hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
98b676c6ffb8b51257419f26f075d660ab1d31f9.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by heidies on 7/1/18.
//
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if(error != hipSuccess){ \
printf("Error: %s %d, ", __FILE__, __LINE__); \
printf("code: %d, reason %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0e-8;
int match = 1;
for (int i = 0; i < N; ++ i){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match. \n\n");
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned )time(&t));
for (int i = 0; i < size; ++ i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N){
for (int i = 0; i < N; ++ i ){
C[i] = A[i] + B[i];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
int main(int argc, char **argv){
printf("%s Starting...\n", argv[0]);
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
printf("Initial data time elapsed: %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArraysOnHost time elapsed: %f sec\n", iElaps);
float *d_A, *d_B, *d_C;
hipMalloc((float **)&d_A, nBytes);
hipMalloc((float **)&d_B, nBytes);
hipMalloc((float **)&d_C, nBytes);
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
int iLen = 1024;
dim3 block(iLen);
dim3 grid((nElem + block.x - 1) / block.x);
iStart = cpuSecond();
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem);
hipDeviceSynchronize();
iElaps = cpuSecond() - iStart;
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
printf("sumArraysOnGPU <<<%d, %d>>> time elapsed %f" \
" sec\n", grid.x, block.x, iElaps);
checkResult(hostRef, gpuRef, nElem);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
| 98b676c6ffb8b51257419f26f075d660ab1d31f9.cu | //
// Created by heidies on 7/1/18.
//
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <math.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess){ \
printf("Error: %s %d, ", __FILE__, __LINE__); \
printf("code: %d, reason %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0e-8;
int match = 1;
for (int i = 0; i < N; ++ i){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match. \n\n");
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned )time(&t));
for (int i = 0; i < size; ++ i){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N){
for (int i = 0; i < N; ++ i ){
C[i] = A[i] + B[i];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1e-6);
}
int main(int argc, char **argv){
printf("%s Starting...\n", argv[0]);
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
iStart = cpuSecond();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = cpuSecond() - iStart;
printf("Initial data time elapsed: %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
iStart = cpuSecond();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = cpuSecond() - iStart;
printf("sumArraysOnHost time elapsed: %f sec\n", iElaps);
float *d_A, *d_B, *d_C;
cudaMalloc((float **)&d_A, nBytes);
cudaMalloc((float **)&d_B, nBytes);
cudaMalloc((float **)&d_C, nBytes);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
int iLen = 1024;
dim3 block(iLen);
dim3 grid((nElem + block.x - 1) / block.x);
iStart = cpuSecond();
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem);
cudaDeviceSynchronize();
iElaps = cpuSecond() - iStart;
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
printf("sumArraysOnGPU <<<%d, %d>>> time elapsed %f" \
" sec\n", grid.x, block.x, iElaps);
checkResult(hostRef, gpuRef, nElem);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0;
}
|
1254902302eab27cefb9fa1b5e0b9db5aa7f800d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Started from glfwminimal-
// and attempting to bring in Thrust OpenGL interop
// following https://gist.github.com/dangets/2926425/download#
//
#include <GLFW/glfw3.h>
#include <cstdlib>
#include <cstdio>
#include <cuda_gl_interop.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
unsigned int g_window_width = 512;
unsigned int g_window_height = 512;
unsigned int g_mesh_width = 256;
unsigned int g_mesh_height = 256;
thrust::device_ptr<float4> dev_ptr;
GLuint vbo;
struct cudaGraphicsResource *vbo_cuda;
float g_anim = 0.0;
// mouse controls
int g_mouse_old_x, g_mouse_old_y;
int g_mouse_buttons = 0;
float g_rotate_x = 0.0, g_rotate_y = 0.0;
float g_translate_z = -3.0;
struct sine_wave
{
sine_wave(unsigned int w, unsigned int h, float t)
:
width(w),
height(h),
time(t)
{
}
__host__ __device__
float4 operator()(unsigned int i)
{
unsigned int x = i % width;
unsigned int y = i / width;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
return make_float4(u, w, v, 1.0f);
}
float time;
unsigned int width, height;
};
static void error_callback(int error, const char* description)
{
fputs(description, stderr);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
void init()
{
//hipGLSetGLDevice(0);
unsigned int size = g_mesh_width * g_mesh_height * sizeof(float4);
printf("init size:%d width:%d height:%d \n", size, g_mesh_width, g_mesh_height);
float* data = new float[size] ;
unsigned int index(0);
for(unsigned ix=0 ; ix < g_mesh_width ; ix++){
for(unsigned iy=0 ; iy < g_mesh_height ; iy++){
index = iy*g_mesh_width + ix ;
data[index*4+0] = float(ix)/float(g_mesh_width) ;
data[index*4+1] = float(iy)/float(g_mesh_height) ;
data[index*4+2] = 0.f ;
data[index*4+3] = 1.f ;
}
}
// create vbo
glGenBuffers(1, &vbo);
// bind, initialize, unbind
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, size, data, GL_DYNAMIC_DRAW); // target, size, data, usage
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register buffer object with CUDA
//hipGraphicsGLRegisterBuffer(&vbo_cuda, vbo, hipGraphicsMapFlagsWriteDiscard);
}
void setup_view(float ratio)
{
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-ratio, ratio, -1.f, 1.f, 1.f, -1.f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void display_triangles()
{
glRotatef((float) glfwGetTime() * 50.f, 0.f, 0.f, 1.f);
glBegin(GL_TRIANGLES);
glColor3f(1.f, 0.f, 0.f);
glVertex3f(-0.6f, -0.4f, 0.f);
glColor3f(0.f, 1.f, 0.f);
glVertex3f(0.6f, -0.4f, 0.f);
glColor3f(0.f, 0.f, 1.f);
glVertex3f(0.f, 0.6f, 0.f);
glEnd();
}
void display_thrust(bool thrust)
{
if(thrust)
{
hipGraphicsMapResources(1, &vbo_cuda, 0);
float4 *raw_ptr;
size_t buf_size;
hipGraphicsResourceGetMappedPointer((void **)&raw_ptr, &buf_size, vbo_cuda);
dev_ptr = thrust::device_pointer_cast(raw_ptr);
// transform the mesh
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last(g_mesh_width * g_mesh_height);
thrust::transform(first, last, dev_ptr, sine_wave(g_mesh_width, g_mesh_height, g_anim));
hipGraphicsUnmapResources(1, &vbo_cuda, 0);
}
glTranslatef(0.0, 0.0, g_translate_z);
glRotatef(g_rotate_x, 1.0, 0.0, 0.0);
glRotatef(g_rotate_y, 0.0, 1.0, 0.0);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(4, GL_FLOAT, 0, 0); // size, type, stride, pointer
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1.0, 0.0, 0.0);
glDrawArrays(GL_LINES, 0, g_mesh_width * g_mesh_height); // mode, first, count
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//glutSwapBuffers();
//glutPostRedisplay();
g_anim += 0.001;
}
int main(int argc, char** argv)
{
char mode = argc > 1 && strlen(argv[1]) > 0 ? argv[1][0] : 'A' ;
const char* msg = NULL ;
switch(mode)
{
case 'A':msg="mode A : display_triangles " ; break ;
case 'B':msg="mode B : display_thrust(true) " ; break ;
case 'C':msg="mode C : display_thrust(false) " ; break ;
default:msg="unknown mode " ; break ;
}
printf(" %s : %s \n", argv[0], msg );
GLFWwindow* window;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
window = glfwCreateWindow(g_window_width, g_window_height, msg, NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(window);
glfwSwapInterval(1);
init();
glfwSetKeyCallback(window, key_callback);
while (!glfwWindowShouldClose(window))
{
float ratio;
int width, height;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glViewport(0, 0, width, height);
setup_view(ratio);
switch(mode)
{
case 'A':display_triangles() ;break;
case 'B':display_thrust(true) ;break;
case 'C':display_thrust(false) ;break;
}
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
| 1254902302eab27cefb9fa1b5e0b9db5aa7f800d.cu | /*
* Copyright (c) 2019 Opticks Team. All Rights Reserved.
*
* This file is part of Opticks
* (see https://bitbucket.org/simoncblyth/opticks).
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Started from glfwminimal-
// and attempting to bring in Thrust OpenGL interop
// following https://gist.github.com/dangets/2926425/download#
//
#include <GLFW/glfw3.h>
#include <cstdlib>
#include <cstdio>
#include <cuda_gl_interop.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/iterator/counting_iterator.h>
unsigned int g_window_width = 512;
unsigned int g_window_height = 512;
unsigned int g_mesh_width = 256;
unsigned int g_mesh_height = 256;
thrust::device_ptr<float4> dev_ptr;
GLuint vbo;
struct cudaGraphicsResource *vbo_cuda;
float g_anim = 0.0;
// mouse controls
int g_mouse_old_x, g_mouse_old_y;
int g_mouse_buttons = 0;
float g_rotate_x = 0.0, g_rotate_y = 0.0;
float g_translate_z = -3.0;
struct sine_wave
{
sine_wave(unsigned int w, unsigned int h, float t)
:
width(w),
height(h),
time(t)
{
}
__host__ __device__
float4 operator()(unsigned int i)
{
unsigned int x = i % width;
unsigned int y = i / width;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
return make_float4(u, w, v, 1.0f);
}
float time;
unsigned int width, height;
};
static void error_callback(int error, const char* description)
{
fputs(description, stderr);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
void init()
{
//cudaGLSetGLDevice(0);
unsigned int size = g_mesh_width * g_mesh_height * sizeof(float4);
printf("init size:%d width:%d height:%d \n", size, g_mesh_width, g_mesh_height);
float* data = new float[size] ;
unsigned int index(0);
for(unsigned ix=0 ; ix < g_mesh_width ; ix++){
for(unsigned iy=0 ; iy < g_mesh_height ; iy++){
index = iy*g_mesh_width + ix ;
data[index*4+0] = float(ix)/float(g_mesh_width) ;
data[index*4+1] = float(iy)/float(g_mesh_height) ;
data[index*4+2] = 0.f ;
data[index*4+3] = 1.f ;
}
}
// create vbo
glGenBuffers(1, &vbo);
// bind, initialize, unbind
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, size, data, GL_DYNAMIC_DRAW); // target, size, data, usage
glBindBuffer(GL_ARRAY_BUFFER, 0);
// register buffer object with CUDA
//cudaGraphicsGLRegisterBuffer(&vbo_cuda, vbo, cudaGraphicsMapFlagsWriteDiscard);
}
void setup_view(float ratio)
{
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-ratio, ratio, -1.f, 1.f, 1.f, -1.f);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void display_triangles()
{
glRotatef((float) glfwGetTime() * 50.f, 0.f, 0.f, 1.f);
glBegin(GL_TRIANGLES);
glColor3f(1.f, 0.f, 0.f);
glVertex3f(-0.6f, -0.4f, 0.f);
glColor3f(0.f, 1.f, 0.f);
glVertex3f(0.6f, -0.4f, 0.f);
glColor3f(0.f, 0.f, 1.f);
glVertex3f(0.f, 0.6f, 0.f);
glEnd();
}
void display_thrust(bool thrust)
{
if(thrust)
{
cudaGraphicsMapResources(1, &vbo_cuda, 0);
float4 *raw_ptr;
size_t buf_size;
cudaGraphicsResourceGetMappedPointer((void **)&raw_ptr, &buf_size, vbo_cuda);
dev_ptr = thrust::device_pointer_cast(raw_ptr);
// transform the mesh
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last(g_mesh_width * g_mesh_height);
thrust::transform(first, last, dev_ptr, sine_wave(g_mesh_width, g_mesh_height, g_anim));
cudaGraphicsUnmapResources(1, &vbo_cuda, 0);
}
glTranslatef(0.0, 0.0, g_translate_z);
glRotatef(g_rotate_x, 1.0, 0.0, 0.0);
glRotatef(g_rotate_y, 0.0, 1.0, 0.0);
// render from the vbo
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexPointer(4, GL_FLOAT, 0, 0); // size, type, stride, pointer
glEnableClientState(GL_VERTEX_ARRAY);
glColor3f(1.0, 0.0, 0.0);
glDrawArrays(GL_LINES, 0, g_mesh_width * g_mesh_height); // mode, first, count
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//glutSwapBuffers();
//glutPostRedisplay();
g_anim += 0.001;
}
int main(int argc, char** argv)
{
char mode = argc > 1 && strlen(argv[1]) > 0 ? argv[1][0] : 'A' ;
const char* msg = NULL ;
switch(mode)
{
case 'A':msg="mode A : display_triangles " ; break ;
case 'B':msg="mode B : display_thrust(true) " ; break ;
case 'C':msg="mode C : display_thrust(false) " ; break ;
default:msg="unknown mode " ; break ;
}
printf(" %s : %s \n", argv[0], msg );
GLFWwindow* window;
glfwSetErrorCallback(error_callback);
if (!glfwInit())
exit(EXIT_FAILURE);
window = glfwCreateWindow(g_window_width, g_window_height, msg, NULL, NULL);
if (!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(window);
glfwSwapInterval(1);
init();
glfwSetKeyCallback(window, key_callback);
while (!glfwWindowShouldClose(window))
{
float ratio;
int width, height;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glViewport(0, 0, width, height);
setup_view(ratio);
switch(mode)
{
case 'A':display_triangles() ;break;
case 'B':display_thrust(true) ;break;
case 'C':display_thrust(false) ;break;
}
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
|
3021e1ed2f32200f2b37d6994716a7460f94c0ac.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* FILENAME: test_image_pyrdown.cu
*
* AUTHORS: Sun Yucheng START DATE: Tuesday September 14th 2021
*
* LAST MODIFIED: Friday, September 17th 2021, 10:11:13 am
*
* CONTACT: yucheng.sun@smartmore.com
*******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <cudaop/cudaop.h>
#include <catch2/catch.hpp>
#include <chrono>
#include <opencv2/opencv.hpp>
#include "macro.h"
#include "utils.h"
TEST_CASE("PyrDown", "[pyrdown]") {
using namespace smartmore::cudaop;
const int img_h = 3100;
const int img_w = 2600;
constexpr int dst_img_h = (img_h + 1) / 2;
constexpr int dst_img_w = (img_w + 1) / 2;
std::vector<unsigned char> pixels(img_h * img_w);
smartmore::RandomInt8Vector(pixels);
cv::Mat mat(img_h, img_w, CV_8UC1, pixels.data());
unsigned char *src = nullptr;
unsigned char *dst = nullptr;
CUDA_CHECK(hipMalloc(&src, img_h * img_w));
CUDA_CHECK(hipMalloc(&dst, dst_img_h * dst_img_w));
CUDA_CHECK(hipMemcpy(src, mat.data, img_h * img_w, hipMemcpyHostToDevice));
for (int i = 0; i < 10; i++) {
smartmore::Clock clk("Pyrdown-kInt8-cuda: ");
ImagePyrDown<ImageType::kGRAY, DataType::kInt8, BorderType::kReplicate>(src, dst, img_h, img_w);
}
cv::Mat out(dst_img_h, dst_img_w, CV_8UC1);
CUDA_CHECK(hipMemcpy(out.data, dst, dst_img_h * dst_img_w, hipMemcpyDeviceToHost));
CUDA_CHECK_AND_FREE(src);
CUDA_CHECK_AND_FREE(dst);
cv::Mat reference;
for (int i = 0; i < 10; i++) {
smartmore::Clock clk("Pyrdown-kInt8-OpenCV: ");
cv::pyrDown(mat, reference, cv::Size(dst_img_w, dst_img_h), cv::BORDER_REPLICATE);
}
cv::Mat diff = reference - out;
int max_diff = 0;
for (int i = 0; i < dst_img_h * dst_img_w; i++) {
max_diff = max_diff < diff.data[i] ? diff.data[i] : max_diff;
}
REQUIRE(max_diff <= 1);
}
| 3021e1ed2f32200f2b37d6994716a7460f94c0ac.cu | /*******************************************************************************
* FILENAME: test_image_pyrdown.cu
*
* AUTHORS: Sun Yucheng START DATE: Tuesday September 14th 2021
*
* LAST MODIFIED: Friday, September 17th 2021, 10:11:13 am
*
* CONTACT: yucheng.sun@smartmore.com
*******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <cudaop/cudaop.h>
#include <catch2/catch.hpp>
#include <chrono>
#include <opencv2/opencv.hpp>
#include "macro.h"
#include "utils.h"
TEST_CASE("PyrDown", "[pyrdown]") {
using namespace smartmore::cudaop;
const int img_h = 3100;
const int img_w = 2600;
constexpr int dst_img_h = (img_h + 1) / 2;
constexpr int dst_img_w = (img_w + 1) / 2;
std::vector<unsigned char> pixels(img_h * img_w);
smartmore::RandomInt8Vector(pixels);
cv::Mat mat(img_h, img_w, CV_8UC1, pixels.data());
unsigned char *src = nullptr;
unsigned char *dst = nullptr;
CUDA_CHECK(cudaMalloc(&src, img_h * img_w));
CUDA_CHECK(cudaMalloc(&dst, dst_img_h * dst_img_w));
CUDA_CHECK(cudaMemcpy(src, mat.data, img_h * img_w, cudaMemcpyHostToDevice));
for (int i = 0; i < 10; i++) {
smartmore::Clock clk("Pyrdown-kInt8-cuda: ");
ImagePyrDown<ImageType::kGRAY, DataType::kInt8, BorderType::kReplicate>(src, dst, img_h, img_w);
}
cv::Mat out(dst_img_h, dst_img_w, CV_8UC1);
CUDA_CHECK(cudaMemcpy(out.data, dst, dst_img_h * dst_img_w, cudaMemcpyDeviceToHost));
CUDA_CHECK_AND_FREE(src);
CUDA_CHECK_AND_FREE(dst);
cv::Mat reference;
for (int i = 0; i < 10; i++) {
smartmore::Clock clk("Pyrdown-kInt8-OpenCV: ");
cv::pyrDown(mat, reference, cv::Size(dst_img_w, dst_img_h), cv::BORDER_REPLICATE);
}
cv::Mat diff = reference - out;
int max_diff = 0;
for (int i = 0; i < dst_img_h * dst_img_w; i++) {
max_diff = max_diff < diff.data[i] ? diff.data[i] : max_diff;
}
REQUIRE(max_diff <= 1);
}
|
a453dff390669937328629377438d51e2db9eba7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/map_then_reduce.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type, typename MapOp>
__global__ void naiveMapReduceKernel(Type *out, const Type *in, size_t len,
MapOp map) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
myAtomicAdd(out, map(in[idx]));
}
}
template <typename Type, typename MapOp>
void naiveMapReduce(Type *out, const Type *in, size_t len, MapOp map) {
static const int TPB = 64;
int nblks = ceildiv(len, (size_t)TPB);
hipLaunchKernelGGL(( naiveMapReduceKernel<Type, MapOp>), dim3(nblks), dim3(TPB), 0, 0, out, in, len, map);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct MapReduceInputs {
T tolerance;
size_t len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MapReduceInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void mapReduceLaunch(T *out_ref, T *out, const T *in, size_t len) {
auto op = [] __device__(T in) { return in; };
naiveMapReduce(out_ref, in, len, op);
mapThenSumReduce(out, len, op, 0, in);
}
template <typename T>
class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MapReduceInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
auto len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
mapReduceLaunch(out_ref, out, in, len);
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
}
protected:
MapReduceInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<MapReduceInputs<float>> inputsf = {
{0.001f, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<float> MapReduceTestF;
TEST_P(MapReduceTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestF,
::testing::ValuesIn(inputsf));
const std::vector<MapReduceInputs<double>> inputsd = {
{0.000001, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<double> MapReduceTestD;
TEST_P(MapReduceTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
| a453dff390669937328629377438d51e2db9eba7.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/map_then_reduce.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type, typename MapOp>
__global__ void naiveMapReduceKernel(Type *out, const Type *in, size_t len,
MapOp map) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
myAtomicAdd(out, map(in[idx]));
}
}
template <typename Type, typename MapOp>
void naiveMapReduce(Type *out, const Type *in, size_t len, MapOp map) {
static const int TPB = 64;
int nblks = ceildiv(len, (size_t)TPB);
naiveMapReduceKernel<Type, MapOp><<<nblks, TPB>>>(out, in, len, map);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct MapReduceInputs {
T tolerance;
size_t len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MapReduceInputs<T> &dims) {
return os;
}
// Or else, we get the following compilation error
// for an extended __device__ lambda cannot have private or protected access
// within its class
template <typename T>
void mapReduceLaunch(T *out_ref, T *out, const T *in, size_t len) {
auto op = [] __device__(T in) { return in; };
naiveMapReduce(out_ref, in, len, op);
mapThenSumReduce(out, len, op, 0, in);
}
template <typename T>
class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MapReduceInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
auto len = params.len;
allocate(in, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in, len, T(-1.0), T(1.0));
mapReduceLaunch(out_ref, out, in, len);
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
}
protected:
MapReduceInputs<T> params;
T *in, *out_ref, *out;
};
const std::vector<MapReduceInputs<float>> inputsf = {
{0.001f, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<float> MapReduceTestF;
TEST_P(MapReduceTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestF,
::testing::ValuesIn(inputsf));
const std::vector<MapReduceInputs<double>> inputsd = {
{0.000001, 1024 * 1024, 1234ULL}};
typedef MapReduceTest<double> MapReduceTestD;
TEST_P(MapReduceTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MapReduceTests, MapReduceTestD,
::testing::ValuesIn(inputsd));
} // end namespace LinAlg
} // end namespace MLCommon
|
afc72af69178cccace3ac71a1a74e6d15f61b470.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 32
unsigned int *read_ppm( char *filename, int & xsize, int & ysize, int & maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
fprintf(stderr, "read_ppm( %s )\n", filename);
int fd = open( filename, O_RDONLY);
if (fd == -1){
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
int num = read(fd, chars, 1000);
if (chars[0] != 'P' || chars[1] != '6'){
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#'){ // comment line!
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
xsize = width;
ysize = height;
maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int) * 3);
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if (maxval > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
lseek(fd, offset, SEEK_SET); // move to the correct offset
long numread = read(fd, buf, bufsize);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
close(fd);
int pixels = xsize * ysize * 3;
for (int i=0; i<pixels; i++) pic[i] = (int) buf[i];
return pic; // success
}
void write_ppm( const char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize * 3;
for (int i=0; i<numpix; i+=3) {
fprintf(fp, "%c%c%c", (unsigned char) pic[i], (unsigned char) pic[i+1], (unsigned char) pic[i+2]);
}
fclose(fp);
}
__global__ void gcount_baseline(unsigned int *pic, int *result, int xsize, int ysize, int *count){
int j = 3*(blockIdx.x*blockDim.x + threadIdx.x); // col
int i = blockIdx.y*blockDim.y + threadIdx.y; // row
int offset = i*xsize*3 + j; // location of red value
if( j < xsize*3 || i < ysize){
int r = pic[offset];
int g = pic[offset+1];
int b = pic[offset+2];
if(g > r && g > b){
r = 255;
g = 0;
b = 0;
atomicAdd(count, 1);
}
result[offset] = r;
result[offset+1] = g;
result[offset+2] = b;
}
}
void checkCudaError(const char* task){
hipError_t err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Oops! (error code %s happened at \"%s\")!\n", hipGetErrorString(err), task);
exit(EXIT_FAILURE);
}
// fprintf(stderr, "Success! Completed \"%s\"!\n", task);
}
main( int argc, char **argv ){
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = "img/img_1.ppm";
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, xsize, ysize, maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
hipEvent_t start_event, stop_event;
float elapsed_time_par;
int *result = (int *) malloc( numbytes );
unsigned int *d_pic = NULL;
int *d_result = NULL;
hipMalloc((void **) &d_pic, numbytes);
checkCudaError("allocate d_pic");
hipMalloc((void **) &d_result, numbytes);
checkCudaError("allocate d_result");
hipMemcpy(d_pic, pic, xsize * ysize * sizeof(unsigned int) * 3 , hipMemcpyHostToDevice);
checkCudaError("copy d_pic");
// Launch the CUDA Kernel
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(ceil(xsize/ (float)BLOCK_SIZE_X ), ceil(ysize/ (float)BLOCK_SIZE_Y ));
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
hipEventRecord(start_event, 0);
int *num_pix_found;
hipMallocManaged(&num_pix_found, 4);
*num_pix_found = 0;
// Launch kernel function
hipLaunchKernelGGL(( gcount_baseline), dim3(grid), dim3(block), 0, 0, d_pic, d_result, xsize, ysize, num_pix_found);
checkCudaError("kernel launch");
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, "Parallel Runtime: %f ms\n", elapsed_time_par);
hipMemcpy(result, d_result, numbytes, hipMemcpyDeviceToHost);
checkCudaError("copy d_result");
write_ppm( "result.ppm", xsize, ysize, 255, result);
fprintf(stderr, "num_pix_found = %d\n", *num_pix_found);
}
| afc72af69178cccace3ac71a1a74e6d15f61b470.cu | #include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 32
unsigned int *read_ppm( char *filename, int & xsize, int & ysize, int & maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
fprintf(stderr, "read_ppm( %s )\n", filename);
int fd = open( filename, O_RDONLY);
if (fd == -1){
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
int num = read(fd, chars, 1000);
if (chars[0] != 'P' || chars[1] != '6'){
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#'){ // comment line!
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
xsize = width;
ysize = height;
maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int) * 3);
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if (maxval > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
lseek(fd, offset, SEEK_SET); // move to the correct offset
long numread = read(fd, buf, bufsize);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
close(fd);
int pixels = xsize * ysize * 3;
for (int i=0; i<pixels; i++) pic[i] = (int) buf[i];
return pic; // success
}
void write_ppm( const char *filename, int xsize, int ysize, int maxval, int *pic)
{
FILE *fp;
fp = fopen(filename, "w");
if (!fp) {
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize * 3;
for (int i=0; i<numpix; i+=3) {
fprintf(fp, "%c%c%c", (unsigned char) pic[i], (unsigned char) pic[i+1], (unsigned char) pic[i+2]);
}
fclose(fp);
}
__global__ void gcount_baseline(unsigned int *pic, int *result, int xsize, int ysize, int *count){
int j = 3*(blockIdx.x*blockDim.x + threadIdx.x); // col
int i = blockIdx.y*blockDim.y + threadIdx.y; // row
int offset = i*xsize*3 + j; // location of red value
if( j < xsize*3 || i < ysize){
int r = pic[offset];
int g = pic[offset+1];
int b = pic[offset+2];
if(g > r && g > b){
r = 255;
g = 0;
b = 0;
atomicAdd(count, 1);
}
result[offset] = r;
result[offset+1] = g;
result[offset+2] = b;
}
}
void checkCudaError(const char* task){
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Oops! (error code %s happened at \"%s\")!\n", cudaGetErrorString(err), task);
exit(EXIT_FAILURE);
}
// fprintf(stderr, "Success! Completed \"%s\"!\n", task);
}
main( int argc, char **argv ){
int thresh = DEFAULT_THRESHOLD;
char *filename;
filename = "img/img_1.ppm";
if (argc > 1) {
if (argc == 3) { // filename AND threshold
filename = strdup( argv[1]);
thresh = atoi( argv[2] );
}
if (argc == 2) { // default file but specified threshhold
thresh = atoi( argv[1] );
}
fprintf(stderr, "file %s threshold %d\n", filename, thresh);
}
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( filename, xsize, ysize, maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
cudaEvent_t start_event, stop_event;
float elapsed_time_par;
int *result = (int *) malloc( numbytes );
unsigned int *d_pic = NULL;
int *d_result = NULL;
cudaMalloc((void **) &d_pic, numbytes);
checkCudaError("allocate d_pic");
cudaMalloc((void **) &d_result, numbytes);
checkCudaError("allocate d_result");
cudaMemcpy(d_pic, pic, xsize * ysize * sizeof(unsigned int) * 3 , cudaMemcpyHostToDevice);
checkCudaError("copy d_pic");
// Launch the CUDA Kernel
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(ceil(xsize/ (float)BLOCK_SIZE_X ), ceil(ysize/ (float)BLOCK_SIZE_Y ));
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
int *num_pix_found;
cudaMallocManaged(&num_pix_found, 4);
*num_pix_found = 0;
// Launch kernel function
gcount_baseline<<<grid, block>>>(d_pic, d_result, xsize, ysize, num_pix_found);
checkCudaError("kernel launch");
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_par, start_event, stop_event);
fprintf(stderr, "Parallel Runtime: %f ms\n", elapsed_time_par);
cudaMemcpy(result, d_result, numbytes, cudaMemcpyDeviceToHost);
checkCudaError("copy d_result");
write_ppm( "result.ppm", xsize, ysize, 255, result);
fprintf(stderr, "num_pix_found = %d\n", *num_pix_found);
}
|
ba1110bfd30d18691a9b6f4b039bc2999b2a46b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<job1.h>
void prepHostMem(void **,int,size_t);
void prepDeviceMem(void **,void **,int,int);
void freeDeviceMem(float **,int);
void kernel1Run(float **,float **);
void kernel2Run(float **,float **);
void kernel3Run(float **,float **);
/**
* Host main Routine
*/
int main(void){
int numElements = 50000;
size_t size = numElements * sizeof(float);
hipError_t err;
float **list_host,**list_device;
/*************************************************************************************************
* process_kernel1 segment
**************************************************************************************************/
printf("Preparing for process_kernel1.\n");
/*****************************************************************************/
//Host memory preparation
list_host = (float**)malloc(3*sizeof(float*));
prepHostMem((void**)list_host,3,size);
for(int a=0;a<3;a++)list_host[a]=(float*)list_host[a];
// Initialize the host input vectors
for (int i = 0; i < numElements; i++){
list_host[0][i] = rand()/(float)RAND_MAX;
list_host[1][i] = rand()/(float)RAND_MAX;
}
/*****************************************************************************/
/*****************************************************************************/
//Device arrays
list_device=(float**)malloc(3*sizeof(float*));
prepDeviceMem((void**)list_host,(void**)list_device,3,2);
/*****************************************************************************/
/*****************************************************************************/
//process_kernel1
kernel1Run(list_host,list_device);
//Freeing host and device list
free(list_host);
free(list_device);
printf("Job process_kernel1 finished.\n");
/*****************************************************************************/
/*************************************************************************************************
* process_kernel2 segment
**************************************************************************************************/
printf("Preparing for process_kernel2.\n");
/*****************************************************************************/
//Host memory preparation
list_host = (float**)malloc(2*sizeof(float*));
prepHostMem((void**)list_host,2,size);
for(int a=0;a<2;a++)list_host[a]=(float*)list_host[a];
// Initialize the host input vectors
for (int i = 0; i < numElements; i++){
list_host[0][i] = rand()/(float)RAND_MAX;
}
/*****************************************************************************/
/*****************************************************************************/
//Device arrays
list_device=(float**)malloc(2*sizeof(float*));
prepDeviceMem((void**)list_host,(void**)list_device,2,1);
/*****************************************************************************/
/*****************************************************************************/
//process_kernel1
kernel2Run(list_host,list_device);
//Freeing host and device list
free(list_host);
free(list_device);
printf("Job process_kernel2 finished.\n");
/*****************************************************************************/
/*************************************************************************************************
* process_kernel3 segment
**************************************************************************************************/
printf("Preparing for process_kernel3.\n");
/*****************************************************************************/
//Host memory preparation
list_host = (float**)malloc(2*sizeof(float*));
prepHostMem((void**)list_host,2,size);
for(int a=0;a<2;a++)list_host[a]=(float*)list_host[a];
// Initialize the host input vectors
for (int i = 0; i < numElements; i++){
list_host[0][i] = rand()/(float)RAND_MAX;
}
/*****************************************************************************/
/*****************************************************************************/
//Device arrays
list_device=(float**)malloc(2*sizeof(float*));
prepDeviceMem((void**)list_host,(void**)list_device,2,1);
/*****************************************************************************/
/*****************************************************************************/
//process_kernel1
kernel3Run(list_host,list_device);
//Freeing host and device list
free(list_host);
free(list_device);
printf("Job process_kernel3 finished.\n");
/*****************************************************************************/
/*****************************************************************************/
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess){
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
/*****************************************************************************/
printf("All jobs finished.\n");
fflush(stdout);
return 0;
}
/**
*This function mallocs a list of host arrays each of size "size".
*The list is supposed to be of length "l".
*/
void prepHostMem(void **list_host,int l,size_t size){
printf("Allocating host memory...");
for(int a=0;a<l;a++){
list_host[a] = malloc(size);
if (list_host[a]==NULL){
fprintf(stderr,"Failed to allocate host vectors!\n");
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
}
/**
*This function mallocs a list of device arrays each corresponding to a host array.
*The function not only cudaMallocs, but also copies memory from host array to device array.
*Thus it takes list of host and device arrays. It cudaMallocs "l1" number of device arrays
*and copies "l2" number of host arrays to device arrays.
*/
void prepDeviceMem(void **list_host,void **list_device,int l1,int l2){
printf("Preparing device memory:\n");
hipError_t err = hipSuccess;
printf("Allcating device memory...");
//Allocating device memory
for(int a=0;a<l1;a++){
err = hipMalloc((void **)&list_device[a], sizeof(list_host[a]));
if (err != hipSuccess){
fprintf(stderr, "Failed to allocate device vector %d (error code %s)!\n", a+1,hipGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
//Copying from host to device
printf("Copying to device memory...");
//Allocating device memory
for(int a=0;a<l2;a++){
err = hipMemcpy(list_device[a], list_host[a], sizeof(list_host[a]), hipMemcpyHostToDevice);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector %d from host to device (error code %s)!\n", a+1, hipGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
}
/**
*This function frees a list of device arrays.
*The list is supposed to be of length "l".
*/
void freeDeviceMem(float **list_device,int l){
printf("Freeing device memory...");
hipError_t err;
for(int a=0;a<l;a++){
err = hipFree(list_device[a]);
if (err != hipSuccess){
fprintf(stderr, "Failed to free device vector %d (error code %s)!\n", a+1, hipGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
}
/**
*This function encapsulates the run of process_kernel1.
*It takes the list of host and device arrays required to run.
*This functions completes the GPU run and also does the testing part.
*/
void kernel1Run(float **list_host,float **list_device){
printf("CUDA kernel launch with (4,2,2) blocks of (32,32,1) threads.\n");
hipError_t err;
int l=3;
int size = sizeof(list_device[0]);
int numElements = size/sizeof(float);
dim3 X(4,2,2);
dim3 Y(32,32,1);
hipLaunchKernelGGL(( process_kernel1), dim3(X), dim3(Y), 0, 0, list_device[0], list_device[1], list_device[2], size);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch process1 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Kernel successfully returned from device.\n");
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copying output data from the CUDA device to the host memory...");
err = hipMemcpy((void*)list_host[2], (void*)list_device[2], size, hipMemcpyDeviceToHost);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
printf("OK.\n");
// Verify that the result vector is correct
printf("Verifying results...");
for (int i = 0; i < numElements; ++i){
if (fabs(sin(list_host[0][i]) + cos(list_host[1][i]) - list_host[2][i]) > 1e-5){
fprintf(stderr, "Result verification failed at element %d!\n", i);
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK [Test Passed].\n");
//Free device global memory
freeDeviceMem(list_device,l);
// Free host memory
printf("Freeing host memory...");
for(int a=0;a<l;a++)free(list_host[a]);
printf("OK.\n");
}
/**
*This function encapsulates the run of process_kernel2.
*It takes the list of host and device arrays required to run.
*This functions completes the GPU run and also does the testing part.
*/
void kernel2Run(float **list_host,float **list_device){
int size = sizeof(list_device[0]);
int numElements = size/sizeof(float);
int blockz = 16;
int gridy = (int)ceil((float)numElements/(8*8*blockz));
printf("CUDA kernel launch with (2,%d,1) blocks of (8,8,%d) threads.\n",gridy,blockz);
hipError_t err;
int l=2;
dim3 X(2,gridy,1);
dim3 Y(8,8,blockz);
hipLaunchKernelGGL(( process_kernel2), dim3(X), dim3(Y), 0, 0, list_device[0], list_device[1], size);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch process1 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Kernel successfully returned from device.\n");
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copying output data from the CUDA device to the host memory...");
err = hipMemcpy((void*)list_host[1], (void*)list_device[1], size, hipMemcpyDeviceToHost);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
printf("OK.\n");
// Verify that the result vector is correct
printf("Verifying results...");
for (int i = 0; i < numElements; ++i){
if (fabs(log(list_host[0][i]) - list_host[1][i]) > 1e-5){
fprintf(stderr, "Result verification failed at element %d!\n", i);
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK [Test Passed].\n");
//Free device global memory
freeDeviceMem(list_device,l);
// Free host memory
printf("Freeing host memory...");
for(int a=0;a<l;a++)free(list_host[a]);
printf("OK.\n");
}
/**
*This function encapsulates the run of process_kernel3.
*It takes the list of host and device arrays required to run.
*This functions completes the GPU run and also does the testing part.
*/
void kernel3Run(float **list_host,float **list_device){
int size = sizeof(list_device[0]);
int numElements = size/sizeof(float);
int blocky = 4;
int gridx = (int)ceil((float)numElements/(128*blocky));
printf("CUDA kernel launch with (%d,1,1) blocks of (128,%d,1) threads.\n",gridx,blocky);
hipError_t err;
int l=2;
dim3 X(gridx,1,1);
dim3 Y(128,blocky,1);
hipLaunchKernelGGL(( process_kernel3), dim3(X), dim3(Y), 0, 0, list_device[0], list_device[1], size);
err = hipGetLastError();
if (err != hipSuccess){
fprintf(stderr, "Failed to launch process1 kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Kernel successfully returned from device.\n");
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copying output data from the CUDA device to the host memory...");
err = hipMemcpy((void*)list_host[1], (void*)list_device[1], size, hipMemcpyDeviceToHost);
if (err != hipSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
printf("OK.\n");
// Verify that the result vector is correct
printf("Verifying results...");
for (int i = 0; i < numElements; ++i){
if (fabs(sqrt(list_host[0][i]) - list_host[1][i]) > 1e-5){
fprintf(stderr, "Result verification failed at element %d!\n", i);
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK [Test Passed].\n");
//Free device global memory
freeDeviceMem(list_device,l);
// Free host memory
printf("Freeing host memory...");
for(int a=0;a<l;a++)free(list_host[a]);
printf("OK.\n");
}
| ba1110bfd30d18691a9b6f4b039bc2999b2a46b0.cu | #include<job1.h>
void prepHostMem(void **,int,size_t);
void prepDeviceMem(void **,void **,int,int);
void freeDeviceMem(float **,int);
void kernel1Run(float **,float **);
void kernel2Run(float **,float **);
void kernel3Run(float **,float **);
/**
* Host main Routine
*/
int main(void){
int numElements = 50000;
size_t size = numElements * sizeof(float);
cudaError_t err;
float **list_host,**list_device;
/*************************************************************************************************
* process_kernel1 segment
**************************************************************************************************/
printf("Preparing for process_kernel1.\n");
/*****************************************************************************/
//Host memory preparation
list_host = (float**)malloc(3*sizeof(float*));
prepHostMem((void**)list_host,3,size);
for(int a=0;a<3;a++)list_host[a]=(float*)list_host[a];
// Initialize the host input vectors
for (int i = 0; i < numElements; i++){
list_host[0][i] = rand()/(float)RAND_MAX;
list_host[1][i] = rand()/(float)RAND_MAX;
}
/*****************************************************************************/
/*****************************************************************************/
//Device arrays
list_device=(float**)malloc(3*sizeof(float*));
prepDeviceMem((void**)list_host,(void**)list_device,3,2);
/*****************************************************************************/
/*****************************************************************************/
//process_kernel1
kernel1Run(list_host,list_device);
//Freeing host and device list
free(list_host);
free(list_device);
printf("Job process_kernel1 finished.\n");
/*****************************************************************************/
/*************************************************************************************************
* process_kernel2 segment
**************************************************************************************************/
printf("Preparing for process_kernel2.\n");
/*****************************************************************************/
//Host memory preparation
list_host = (float**)malloc(2*sizeof(float*));
prepHostMem((void**)list_host,2,size);
for(int a=0;a<2;a++)list_host[a]=(float*)list_host[a];
// Initialize the host input vectors
for (int i = 0; i < numElements; i++){
list_host[0][i] = rand()/(float)RAND_MAX;
}
/*****************************************************************************/
/*****************************************************************************/
//Device arrays
list_device=(float**)malloc(2*sizeof(float*));
prepDeviceMem((void**)list_host,(void**)list_device,2,1);
/*****************************************************************************/
/*****************************************************************************/
//process_kernel1
kernel2Run(list_host,list_device);
//Freeing host and device list
free(list_host);
free(list_device);
printf("Job process_kernel2 finished.\n");
/*****************************************************************************/
/*************************************************************************************************
* process_kernel3 segment
**************************************************************************************************/
printf("Preparing for process_kernel3.\n");
/*****************************************************************************/
//Host memory preparation
list_host = (float**)malloc(2*sizeof(float*));
prepHostMem((void**)list_host,2,size);
for(int a=0;a<2;a++)list_host[a]=(float*)list_host[a];
// Initialize the host input vectors
for (int i = 0; i < numElements; i++){
list_host[0][i] = rand()/(float)RAND_MAX;
}
/*****************************************************************************/
/*****************************************************************************/
//Device arrays
list_device=(float**)malloc(2*sizeof(float*));
prepDeviceMem((void**)list_host,(void**)list_device,2,1);
/*****************************************************************************/
/*****************************************************************************/
//process_kernel1
kernel3Run(list_host,list_device);
//Freeing host and device list
free(list_host);
free(list_device);
printf("Job process_kernel3 finished.\n");
/*****************************************************************************/
/*****************************************************************************/
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess){
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*****************************************************************************/
printf("All jobs finished.\n");
fflush(stdout);
return 0;
}
/**
*This function mallocs a list of host arrays each of size "size".
*The list is supposed to be of length "l".
*/
void prepHostMem(void **list_host,int l,size_t size){
printf("Allocating host memory...");
for(int a=0;a<l;a++){
list_host[a] = malloc(size);
if (list_host[a]==NULL){
fprintf(stderr,"Failed to allocate host vectors!\n");
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
}
/**
*This function mallocs a list of device arrays each corresponding to a host array.
*The function not only cudaMallocs, but also copies memory from host array to device array.
*Thus it takes list of host and device arrays. It cudaMallocs "l1" number of device arrays
*and copies "l2" number of host arrays to device arrays.
*/
void prepDeviceMem(void **list_host,void **list_device,int l1,int l2){
printf("Preparing device memory:\n");
cudaError_t err = cudaSuccess;
printf("Allcating device memory...");
//Allocating device memory
for(int a=0;a<l1;a++){
err = cudaMalloc((void **)&list_device[a], sizeof(list_host[a]));
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector %d (error code %s)!\n", a+1,cudaGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
//Copying from host to device
printf("Copying to device memory...");
//Allocating device memory
for(int a=0;a<l2;a++){
err = cudaMemcpy(list_device[a], list_host[a], sizeof(list_host[a]), cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector %d from host to device (error code %s)!\n", a+1, cudaGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
}
/**
*This function frees a list of device arrays.
*The list is supposed to be of length "l".
*/
void freeDeviceMem(float **list_device,int l){
printf("Freeing device memory...");
cudaError_t err;
for(int a=0;a<l;a++){
err = cudaFree(list_device[a]);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free device vector %d (error code %s)!\n", a+1, cudaGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK.\n");
}
/**
*This function encapsulates the run of process_kernel1.
*It takes the list of host and device arrays required to run.
*This functions completes the GPU run and also does the testing part.
*/
void kernel1Run(float **list_host,float **list_device){
printf("CUDA kernel launch with (4,2,2) blocks of (32,32,1) threads.\n");
cudaError_t err;
int l=3;
int size = sizeof(list_device[0]);
int numElements = size/sizeof(float);
dim3 X(4,2,2);
dim3 Y(32,32,1);
process_kernel1<<<X, Y>>>(list_device[0], list_device[1], list_device[2], size);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch process1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Kernel successfully returned from device.\n");
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copying output data from the CUDA device to the host memory...");
err = cudaMemcpy((void*)list_host[2], (void*)list_device[2], size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
printf("OK.\n");
// Verify that the result vector is correct
printf("Verifying results...");
for (int i = 0; i < numElements; ++i){
if (fabs(sin(list_host[0][i]) + cos(list_host[1][i]) - list_host[2][i]) > 1e-5){
fprintf(stderr, "Result verification failed at element %d!\n", i);
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK [Test Passed].\n");
//Free device global memory
freeDeviceMem(list_device,l);
// Free host memory
printf("Freeing host memory...");
for(int a=0;a<l;a++)free(list_host[a]);
printf("OK.\n");
}
/**
*This function encapsulates the run of process_kernel2.
*It takes the list of host and device arrays required to run.
*This functions completes the GPU run and also does the testing part.
*/
void kernel2Run(float **list_host,float **list_device){
int size = sizeof(list_device[0]);
int numElements = size/sizeof(float);
int blockz = 16;
int gridy = (int)ceil((float)numElements/(8*8*blockz));
printf("CUDA kernel launch with (2,%d,1) blocks of (8,8,%d) threads.\n",gridy,blockz);
cudaError_t err;
int l=2;
dim3 X(2,gridy,1);
dim3 Y(8,8,blockz);
process_kernel2<<<X, Y>>>(list_device[0], list_device[1], size);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch process1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Kernel successfully returned from device.\n");
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copying output data from the CUDA device to the host memory...");
err = cudaMemcpy((void*)list_host[1], (void*)list_device[1], size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
printf("OK.\n");
// Verify that the result vector is correct
printf("Verifying results...");
for (int i = 0; i < numElements; ++i){
if (fabs(log(list_host[0][i]) - list_host[1][i]) > 1e-5){
fprintf(stderr, "Result verification failed at element %d!\n", i);
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK [Test Passed].\n");
//Free device global memory
freeDeviceMem(list_device,l);
// Free host memory
printf("Freeing host memory...");
for(int a=0;a<l;a++)free(list_host[a]);
printf("OK.\n");
}
/**
*This function encapsulates the run of process_kernel3.
*It takes the list of host and device arrays required to run.
*This functions completes the GPU run and also does the testing part.
*/
void kernel3Run(float **list_host,float **list_device){
int size = sizeof(list_device[0]);
int numElements = size/sizeof(float);
int blocky = 4;
int gridx = (int)ceil((float)numElements/(128*blocky));
printf("CUDA kernel launch with (%d,1,1) blocks of (128,%d,1) threads.\n",gridx,blocky);
cudaError_t err;
int l=2;
dim3 X(gridx,1,1);
dim3 Y(128,blocky,1);
process_kernel3<<<X, Y>>>(list_device[0], list_device[1], size);
err = cudaGetLastError();
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch process1 kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Kernel successfully returned from device.\n");
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copying output data from the CUDA device to the host memory...");
err = cudaMemcpy((void*)list_host[1], (void*)list_device[1], size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
printf("Fail.\n");
exit(EXIT_FAILURE);
}
printf("OK.\n");
// Verify that the result vector is correct
printf("Verifying results...");
for (int i = 0; i < numElements; ++i){
if (fabs(sqrt(list_host[0][i]) - list_host[1][i]) > 1e-5){
fprintf(stderr, "Result verification failed at element %d!\n", i);
printf("Fail.\n");
exit(EXIT_FAILURE);
}
}
printf("OK [Test Passed].\n");
//Free device global memory
freeDeviceMem(list_device,l);
// Free host memory
printf("Freeing host memory...");
for(int a=0;a<l;a++)free(list_host[a]);
printf("OK.\n");
}
|
08425e9849002240ccb2a6b2ff8012451636b3be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int current_column = threadIdx.x;
int current_row = blockIdx.x;
int index = current_row * blockDim.x + current_column;
if(current_column < numCols && current_row < numRows){
uchar4 colorPixel = rgbaImage[index];
float intensity = .299f * colorPixel.x + .587f * colorPixel.y + .114f * colorPixel.z;
greyImage[index] = intensity;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize(numRows, 1, 1);
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| 08425e9849002240ccb2a6b2ff8012451636b3be.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
int current_column = threadIdx.x;
int current_row = blockIdx.x;
int index = current_row * blockDim.x + current_column;
if(current_column < numCols && current_row < numRows){
uchar4 colorPixel = rgbaImage[index];
float intensity = .299f * colorPixel.x + .587f * colorPixel.y + .114f * colorPixel.z;
greyImage[index] = intensity;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
const dim3 blockSize(numCols, 1, 1);
const dim3 gridSize(numRows, 1, 1);
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
e6e4ca06d60437785b9ef5ccd13147a87b721bc3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2020 Konduit, K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <system/op_boilerplate.h>
#include <array/NDArray.h>
#include <execution/Threads.h>
#include <helpers/ConstantTadHelper.h>
#include "../triangular_solve.h"
namespace sd {
namespace ops {
namespace helpers {
/*
* lower triangular process for system of linear equations
* x_1 = b_1/a_1,1
* x_2 = (b_2 - a_2,1 * x_1) / a_2,2
* x_3 = (b_3 - a_3,1 * x_1 - a_3,2 * x_2) / a_3,3
* ...
* x_M = (b_M - a_M,1 * x_1 - ... a_M,M-1 * x_M-1)/ a_M,M
*
* output == x
* a == leftInput
* b == rightInput
*
* */
template <typename T>
static _CUDA_HD void lowerTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape,
T const* rightInput, Nd4jLong const* rightInputShape,
bool const unitOnDiag, T* output, const Nd4jLong* outputShape,
Nd4jLong rows, Nd4jLong cols) {
for (auto r = 0; r < rows; r++) {
for (auto j = 0; j < cols; j++) {
Nd4jLong posY[] = {r, j};
Nd4jLong posX[] = {r, r};
auto xIndex = shape::getOffset(leftInputShape, posX, 0);
auto yIndex = shape::getOffset(rightInputShape, posY, 0);
auto zIndex = shape::getOffset(outputShape, posY, 0);
auto sum = rightInput[yIndex];
for (auto c = 0; c < r; c++) {
Nd4jLong posZ[] = {c, j};
Nd4jLong pos[] = {r, c};
auto xcIndex = shape::getOffset(leftInputShape, pos, 0);
auto zcIndex = shape::getOffset(outputShape, posZ, 0);
sum -= leftInput[xcIndex] * output[zcIndex];
}
output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex];
}
}
}
/*
* upper triangular process for system of linear equations
* x_M = b_M/a_M,M
* x_M-1 = (b_M-1 - a_M-1,M-2 * x_M) / a_M-1,M-1
* x_M-2 = (b_M-2 - a_M-2,M-3 * x_M-2 - a_M-2,M-1 * x_M) / a_3,3
* ...
* x_1 = (b_1 - a_1,2 * x_2 - ... a_1,M * x_M)/ a_1,1
*
* output == x
* a == leftInput
* b == rightInput
*
* */
template <typename T>
static _CUDA_HD void upperTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape,
T const* rightInput, Nd4jLong const* rightInputShape, bool const unitOnDiag, T* output,
const Nd4jLong* outputShape, Nd4jLong rows, Nd4jLong cols) {
for (auto r = rows; r > 0; r--) {
for (auto j = 0; j < cols; j++) {
Nd4jLong posY[] = {r - 1, j};
Nd4jLong posX[] = {r - 1, r - 1};
auto xIndex = shape::getOffset(leftInputShape, posX, 0);
auto yIndex = shape::getOffset(rightInputShape, posY, 0);
auto zIndex = shape::getOffset(outputShape, posY, 0);
auto sum = rightInput[yIndex];
for (auto c = r; c < rows; c++) {
Nd4jLong posZ[] = {c, j};
Nd4jLong pos[] = {r - 1, c};
auto zcIndex = shape::getOffset(outputShape, posZ, 0);
auto xcIndex = shape::getOffset(leftInputShape, pos, 0);
sum -= leftInput[xcIndex] * output[zcIndex];
}
output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex];
}
}
}
template <typename T>
static __global__ void triangularSolveKernel(T const* leftInput, Nd4jLong const* leftPartShape,
T const* rightInput, Nd4jLong const* rightPartShape, bool const lower, bool const unitsOnDiag, T* output,
const Nd4jLong* outputShape, const Nd4jLong* tadLeftShape, const Nd4jLong* tadLeftOffset, const Nd4jLong* tadRightShape,
const Nd4jLong* tadRightOffset, const Nd4jLong* tadOutputShape, const Nd4jLong* tadOutputOffset, Nd4jLong batchNum) {
__shared__ Nd4jLong rows;
__shared__ Nd4jLong cols;
if (threadIdx.x == 0) {
rows = shape::sizeAt(leftPartShape, -2);
cols = shape::sizeAt(rightPartShape, -1);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto stop = batchNum;
auto increment = blockDim.x * gridDim.x;
for (auto i = start; i < stop; i += increment) {
auto pLeftPart = leftInput + tadLeftOffset[i];
auto pRightPart = rightInput + tadRightOffset[i];
auto pOutputPart = output + tadOutputOffset[i];
if (lower) {
lowerTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols);
} else {
upperTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols);
}
}
}
template <typename T>
static int triangularSolveFunctor_(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput,
bool lower, bool unitsOnDiag, NDArray* output) {
NDArray::prepareSpecialUse({output}, {leftInput, rightInput});
auto leftTads = ConstantTadHelper::getInstance().tadForDimensions(leftInput->shapeInfo(), {-2, -1});
auto rightTads = ConstantTadHelper::getInstance().tadForDimensions(rightInput->shapeInfo(), {-2, -1});
auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto stream = context->getCudaStream();
T const* leftBuf = reinterpret_cast<T const*>(leftInput->specialBuffer());
T const* rightBuf = reinterpret_cast<T const*>(rightInput->specialBuffer());
T* outputBuf = reinterpret_cast<T*>(output->specialBuffer());
hipLaunchKernelGGL(( triangularSolveKernel<T>), dim3(128), dim3(128), 256, *stream, leftBuf, leftInput->specialShapeInfo(),
rightBuf, rightInput->specialShapeInfo(), lower, unitsOnDiag, outputBuf, output->specialShapeInfo(),
leftTads.specialShapeInfo(), leftTads.specialOffsets(), rightTads.specialShapeInfo(),
rightTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets(),
leftTads.numberOfTads());
NDArray::registerSpecialUse({output}, {leftInput, rightInput});
return Status::OK();
}
/// triangularSolve2D - 2D implementation of triangularSolveFunctor
/// \tparam T - type of NDArray output
/// \param context - launch context pointer
/// \param leftInput - T matrix of equation Tx = b
/// \param rightInput - b vector of equation Tx = b
/// \param lower - lower or upper triangular matrix
/// \param unitsOnDiag - solve for case when only units (1.0) on diagonal is assumed
/// \param output - output vector (x on equation Tx = b)
///
template <typename T>
void triangularSolve2D(sd::LaunchContext* context, const NDArray& leftInput, const NDArray& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output) {
triangularSolveFunctor_<T>(context, const_cast<NDArray*>(&leftInput), const_cast<NDArray*>(&rightInput), lower, unitsOnDiag, &output);
// leftInput.syncToHost(); rightInput.syncToHost(); output.syncToHost();
// T const* pLeftPart = (T const*)leftInput.getBuffer();
// T const* pRightPart = (T const*)rightInput.getBuffer();
// T* pOutputPart = (T*)output.buffer();
// auto rows = leftInput.rows();
// auto cols = leftInput.columns();
// if (lower) {
// lowerTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols);
// } else {
// upperTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols);
// }
// output.syncToDevice();
}
BUILD_SINGLE_TEMPLATE(template void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES);
// template void triangularSolve2D<float>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
// template void triangularSolve2D<bfloat16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
// template void triangularSolve2D<float16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
// template void triangularSolve2D<double>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
int triangularSolveFunctor(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput, bool lower, bool unitsOnDiag, NDArray* output) {
BUILD_SINGLE_SELECTOR(leftInput->dataType(), return triangularSolveFunctor_, (context, leftInput, rightInput, lower, unitsOnDiag, output), FLOAT_NATIVE);
}
template <typename T>
static __global__ void upperAdjointKernel(T const* input, T* output,
Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns,
Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) {
for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) {
auto inputPart = input + inputOffsets[b];
auto outputPart = output + outputOffsets[b];
for (auto r = threadIdx.x; r < rows; r += blockDim.x) {
for (auto c = threadIdx.y; c <= r; c += blockDim.y) {
Nd4jLong zPos[] = {r, c};
Nd4jLong xPos[] = {c, r};
auto zIndex = shape::getOffset(outputTads, zPos);
auto xIndex = shape::getOffset(inputTads, xPos);
outputPart[zIndex] = inputPart[xIndex];
}
}
}
}
template <typename T>
static __global__ void lowerAdjointKernel(T const* input, T* output,
Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns,
Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) {
for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) {
auto inputPart = input + inputOffsets[b];
auto outputPart = output + outputOffsets[b];
for (auto r = threadIdx.x; r < rows; r += blockDim.x) {
for (auto c = r + threadIdx.y; c < columns; c += blockDim.y) {
Nd4jLong zPos[] = {r, c};
Nd4jLong xPos[] = {c, r};
auto zIndex = shape::getOffset(outputTads, zPos);
auto xIndex = shape::getOffset(inputTads, xPos);
outputPart[zIndex] = inputPart[xIndex];
}
}
}
}
template <typename T>
static void adjointTriangularMatrix_(sd::LaunchContext* context, NDArray const* input, bool const lower,
NDArray* output) {
auto inputTads = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {-2, -1});
auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto stream = context->getCudaStream();
auto inputBuf = reinterpret_cast<T const*>(input->specialBuffer());
auto outputBuf = reinterpret_cast<T*>(output->specialBuffer());
auto rows = input->sizeAt(-2);
auto columns = input->sizeAt(-1);
if (lower) {
hipLaunchKernelGGL(( lowerAdjointKernel<T>), dim3(128), dim3(256), 256, *stream, inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets());
} else {
hipLaunchKernelGGL(( upperAdjointKernel<T>), dim3(128), dim3(256), 256, *stream, inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets());
}
}
void adjointMatrix(sd::LaunchContext* context, NDArray const* input, bool const lower, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), adjointTriangularMatrix_, (context, input, lower, output), FLOAT_NATIVE);
}
/*
//////////////////////////////////////////////////////////////////////////
template <typename T>
void triangularSolve2D(sd::LaunchContext* context, NDArray const& A, NDArray const& b, bool const lower, bool const unitsOnDiag, NDArray& x) {
if(A.rankOf() != 2)
throw std::runtime_error("triangularSolve2D: input matrix A must be 2D !");
int temp;
const bool isBvector = b.isCommonVector(temp);
const bool isXvector = x.isCommonVector(temp);
if(A.sizeAt(0) != (isBvector ? b.lengthOf() : b.sizeAt(0)))
throw std::runtime_error("triangularSolve2D: A and b must have the same number of rows !");
if(A.sizeAt(1) != (isXvector ? x.lengthOf() : x.sizeAt(0)))
throw std::runtime_error("triangularSolve2D: columns number of array A must be equal to rows number of array x !");
if(isBvector) {
if(lower) {
for (int i = 0; i < A.sizeAt(0); ++i) {
T sum = b.t<T>(i);
for (int j = 0; j < i; ++j)
sum -= A.t<T>(i,j) * x.t<T>(j);
x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
else {
for (int i = A.sizeAt(0) - 1; i >= 0; --i) {
T sum = b.t<T>(i);
for (int j = i + 1; j < A.sizeAt(1); ++j)
sum -= A.t<T>(i,j) * x.t<T>(j);
x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
else {
if(lower) {
for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) {
for (int i = 0; i < A.sizeAt(0); ++i) {
T sum = b.t<T>(i, bCol);
for (int j = 0; j < i; ++j)
sum -= A.t<T>(i,j) * x.t<T>(j, bCol);
x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
else {
for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) {
for (int i = A.sizeAt(0) - 1; i >= 0; --i) {
T sum = b.t<T>(i, bCol);
for (int j = i + 1; j < A.sizeAt(1); ++j)
sum -= A.t<T>(i,j) * x.t<T>(j, bCol);
x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
}
}
BUILD_SINGLE_TEMPLATE(template void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES);
*/
}
}
}
| e6e4ca06d60437785b9ef5ccd13147a87b721bc3.cu | /*******************************************************************************
* Copyright (c) 2020 Konduit, K.K.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <sgazeos@gmail.com>
//
#include <system/op_boilerplate.h>
#include <array/NDArray.h>
#include <execution/Threads.h>
#include <helpers/ConstantTadHelper.h>
#include "../triangular_solve.h"
namespace sd {
namespace ops {
namespace helpers {
/*
* lower triangular process for system of linear equations
* x_1 = b_1/a_1,1
* x_2 = (b_2 - a_2,1 * x_1) / a_2,2
* x_3 = (b_3 - a_3,1 * x_1 - a_3,2 * x_2) / a_3,3
* ...
* x_M = (b_M - a_M,1 * x_1 - ... a_M,M-1 * x_M-1)/ a_M,M
*
* output == x
* a == leftInput
* b == rightInput
*
* */
template <typename T>
static _CUDA_HD void lowerTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape,
T const* rightInput, Nd4jLong const* rightInputShape,
bool const unitOnDiag, T* output, const Nd4jLong* outputShape,
Nd4jLong rows, Nd4jLong cols) {
for (auto r = 0; r < rows; r++) {
for (auto j = 0; j < cols; j++) {
Nd4jLong posY[] = {r, j};
Nd4jLong posX[] = {r, r};
auto xIndex = shape::getOffset(leftInputShape, posX, 0);
auto yIndex = shape::getOffset(rightInputShape, posY, 0);
auto zIndex = shape::getOffset(outputShape, posY, 0);
auto sum = rightInput[yIndex];
for (auto c = 0; c < r; c++) {
Nd4jLong posZ[] = {c, j};
Nd4jLong pos[] = {r, c};
auto xcIndex = shape::getOffset(leftInputShape, pos, 0);
auto zcIndex = shape::getOffset(outputShape, posZ, 0);
sum -= leftInput[xcIndex] * output[zcIndex];
}
output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex];
}
}
}
/*
* upper triangular process for system of linear equations
* x_M = b_M/a_M,M
* x_M-1 = (b_M-1 - a_M-1,M-2 * x_M) / a_M-1,M-1
* x_M-2 = (b_M-2 - a_M-2,M-3 * x_M-2 - a_M-2,M-1 * x_M) / a_3,3
* ...
* x_1 = (b_1 - a_1,2 * x_2 - ... a_1,M * x_M)/ a_1,1
*
* output == x
* a == leftInput
* b == rightInput
*
* */
template <typename T>
static _CUDA_HD void upperTriangularSolve(T const* leftInput, Nd4jLong const* leftInputShape,
T const* rightInput, Nd4jLong const* rightInputShape, bool const unitOnDiag, T* output,
const Nd4jLong* outputShape, Nd4jLong rows, Nd4jLong cols) {
for (auto r = rows; r > 0; r--) {
for (auto j = 0; j < cols; j++) {
Nd4jLong posY[] = {r - 1, j};
Nd4jLong posX[] = {r - 1, r - 1};
auto xIndex = shape::getOffset(leftInputShape, posX, 0);
auto yIndex = shape::getOffset(rightInputShape, posY, 0);
auto zIndex = shape::getOffset(outputShape, posY, 0);
auto sum = rightInput[yIndex];
for (auto c = r; c < rows; c++) {
Nd4jLong posZ[] = {c, j};
Nd4jLong pos[] = {r - 1, c};
auto zcIndex = shape::getOffset(outputShape, posZ, 0);
auto xcIndex = shape::getOffset(leftInputShape, pos, 0);
sum -= leftInput[xcIndex] * output[zcIndex];
}
output[zIndex] = unitOnDiag?sum:sum / leftInput[xIndex];
}
}
}
template <typename T>
static __global__ void triangularSolveKernel(T const* leftInput, Nd4jLong const* leftPartShape,
T const* rightInput, Nd4jLong const* rightPartShape, bool const lower, bool const unitsOnDiag, T* output,
const Nd4jLong* outputShape, const Nd4jLong* tadLeftShape, const Nd4jLong* tadLeftOffset, const Nd4jLong* tadRightShape,
const Nd4jLong* tadRightOffset, const Nd4jLong* tadOutputShape, const Nd4jLong* tadOutputOffset, Nd4jLong batchNum) {
__shared__ Nd4jLong rows;
__shared__ Nd4jLong cols;
if (threadIdx.x == 0) {
rows = shape::sizeAt(leftPartShape, -2);
cols = shape::sizeAt(rightPartShape, -1);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto stop = batchNum;
auto increment = blockDim.x * gridDim.x;
for (auto i = start; i < stop; i += increment) {
auto pLeftPart = leftInput + tadLeftOffset[i];
auto pRightPart = rightInput + tadRightOffset[i];
auto pOutputPart = output + tadOutputOffset[i];
if (lower) {
lowerTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols);
} else {
upperTriangularSolve<T>(pLeftPart, tadLeftShape, pRightPart, tadRightShape, unitsOnDiag, pOutputPart, tadOutputShape, rows, cols);
}
}
}
template <typename T>
static int triangularSolveFunctor_(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput,
bool lower, bool unitsOnDiag, NDArray* output) {
NDArray::prepareSpecialUse({output}, {leftInput, rightInput});
auto leftTads = ConstantTadHelper::getInstance().tadForDimensions(leftInput->shapeInfo(), {-2, -1});
auto rightTads = ConstantTadHelper::getInstance().tadForDimensions(rightInput->shapeInfo(), {-2, -1});
auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto stream = context->getCudaStream();
T const* leftBuf = reinterpret_cast<T const*>(leftInput->specialBuffer());
T const* rightBuf = reinterpret_cast<T const*>(rightInput->specialBuffer());
T* outputBuf = reinterpret_cast<T*>(output->specialBuffer());
triangularSolveKernel<T><<<128, 128, 256, *stream>>>(leftBuf, leftInput->specialShapeInfo(),
rightBuf, rightInput->specialShapeInfo(), lower, unitsOnDiag, outputBuf, output->specialShapeInfo(),
leftTads.specialShapeInfo(), leftTads.specialOffsets(), rightTads.specialShapeInfo(),
rightTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets(),
leftTads.numberOfTads());
NDArray::registerSpecialUse({output}, {leftInput, rightInput});
return Status::OK();
}
/// triangularSolve2D - 2D implementation of triangularSolveFunctor
/// \tparam T - type of NDArray output
/// \param context - launch context pointer
/// \param leftInput - T matrix of equation Tx = b
/// \param rightInput - b vector of equation Tx = b
/// \param lower - lower or upper triangular matrix
/// \param unitsOnDiag - solve for case when only units (1.0) on diagonal is assumed
/// \param output - output vector (x on equation Tx = b)
///
template <typename T>
void triangularSolve2D(sd::LaunchContext* context, const NDArray& leftInput, const NDArray& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output) {
triangularSolveFunctor_<T>(context, const_cast<NDArray*>(&leftInput), const_cast<NDArray*>(&rightInput), lower, unitsOnDiag, &output);
// leftInput.syncToHost(); rightInput.syncToHost(); output.syncToHost();
// T const* pLeftPart = (T const*)leftInput.getBuffer();
// T const* pRightPart = (T const*)rightInput.getBuffer();
// T* pOutputPart = (T*)output.buffer();
// auto rows = leftInput.rows();
// auto cols = leftInput.columns();
// if (lower) {
// lowerTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols);
// } else {
// upperTriangularSolve<T>(pLeftPart, leftInput.shapeInfo(), pRightPart, rightInput.shapeInfo(), unitsOnDiag, pOutputPart, output.shapeInfo(), rows, cols);
// }
// output.syncToDevice();
}
BUILD_SINGLE_TEMPLATE(template void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES);
// template void triangularSolve2D<float>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
// template void triangularSolve2D<bfloat16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
// template void triangularSolve2D<float16>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
// template void triangularSolve2D<double>(sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output);
int triangularSolveFunctor(sd::LaunchContext * context, NDArray* leftInput, NDArray* rightInput, bool lower, bool unitsOnDiag, NDArray* output) {
BUILD_SINGLE_SELECTOR(leftInput->dataType(), return triangularSolveFunctor_, (context, leftInput, rightInput, lower, unitsOnDiag, output), FLOAT_NATIVE);
}
template <typename T>
static __global__ void upperAdjointKernel(T const* input, T* output,
Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns,
Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) {
for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) {
auto inputPart = input + inputOffsets[b];
auto outputPart = output + outputOffsets[b];
for (auto r = threadIdx.x; r < rows; r += blockDim.x) {
for (auto c = threadIdx.y; c <= r; c += blockDim.y) {
Nd4jLong zPos[] = {r, c};
Nd4jLong xPos[] = {c, r};
auto zIndex = shape::getOffset(outputTads, zPos);
auto xIndex = shape::getOffset(inputTads, xPos);
outputPart[zIndex] = inputPart[xIndex];
}
}
}
}
template <typename T>
static __global__ void lowerAdjointKernel(T const* input, T* output,
Nd4jLong batchSize, Nd4jLong rows, Nd4jLong columns,
Nd4jLong const* inputTads, Nd4jLong const* inputOffsets, Nd4jLong const* outputTads, Nd4jLong const* outputOffsets) {
for (auto b = blockIdx.x; b < batchSize; b += gridDim.x) {
auto inputPart = input + inputOffsets[b];
auto outputPart = output + outputOffsets[b];
for (auto r = threadIdx.x; r < rows; r += blockDim.x) {
for (auto c = r + threadIdx.y; c < columns; c += blockDim.y) {
Nd4jLong zPos[] = {r, c};
Nd4jLong xPos[] = {c, r};
auto zIndex = shape::getOffset(outputTads, zPos);
auto xIndex = shape::getOffset(inputTads, xPos);
outputPart[zIndex] = inputPart[xIndex];
}
}
}
}
template <typename T>
static void adjointTriangularMatrix_(sd::LaunchContext* context, NDArray const* input, bool const lower,
NDArray* output) {
auto inputTads = ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), {-2, -1});
auto outputTads = ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), {-2, -1});
auto stream = context->getCudaStream();
auto inputBuf = reinterpret_cast<T const*>(input->specialBuffer());
auto outputBuf = reinterpret_cast<T*>(output->specialBuffer());
auto rows = input->sizeAt(-2);
auto columns = input->sizeAt(-1);
if (lower) {
lowerAdjointKernel<T><<<128, 256, 256, *stream>>>(inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets());
} else {
upperAdjointKernel<T><<<128, 256, 256, *stream>>>(inputBuf, outputBuf, outputTads.numberOfTads(), rows, columns, inputTads.specialShapeInfo(), inputTads.specialOffsets(), outputTads.specialShapeInfo(), outputTads.specialOffsets());
}
}
void adjointMatrix(sd::LaunchContext* context, NDArray const* input, bool const lower, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), adjointTriangularMatrix_, (context, input, lower, output), FLOAT_NATIVE);
}
/*
//////////////////////////////////////////////////////////////////////////
template <typename T>
void triangularSolve2D(sd::LaunchContext* context, NDArray const& A, NDArray const& b, bool const lower, bool const unitsOnDiag, NDArray& x) {
if(A.rankOf() != 2)
throw std::runtime_error("triangularSolve2D: input matrix A must be 2D !");
int temp;
const bool isBvector = b.isCommonVector(temp);
const bool isXvector = x.isCommonVector(temp);
if(A.sizeAt(0) != (isBvector ? b.lengthOf() : b.sizeAt(0)))
throw std::runtime_error("triangularSolve2D: A and b must have the same number of rows !");
if(A.sizeAt(1) != (isXvector ? x.lengthOf() : x.sizeAt(0)))
throw std::runtime_error("triangularSolve2D: columns number of array A must be equal to rows number of array x !");
if(isBvector) {
if(lower) {
for (int i = 0; i < A.sizeAt(0); ++i) {
T sum = b.t<T>(i);
for (int j = 0; j < i; ++j)
sum -= A.t<T>(i,j) * x.t<T>(j);
x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
else {
for (int i = A.sizeAt(0) - 1; i >= 0; --i) {
T sum = b.t<T>(i);
for (int j = i + 1; j < A.sizeAt(1); ++j)
sum -= A.t<T>(i,j) * x.t<T>(j);
x.r<T>(i) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
else {
if(lower) {
for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) {
for (int i = 0; i < A.sizeAt(0); ++i) {
T sum = b.t<T>(i, bCol);
for (int j = 0; j < i; ++j)
sum -= A.t<T>(i,j) * x.t<T>(j, bCol);
x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
else {
for (int bCol = 0; bCol < b.sizeAt(1); ++bCol) {
for (int i = A.sizeAt(0) - 1; i >= 0; --i) {
T sum = b.t<T>(i, bCol);
for (int j = i + 1; j < A.sizeAt(1); ++j)
sum -= A.t<T>(i,j) * x.t<T>(j, bCol);
x.r<T>(i, bCol) = unitsOnDiag ? sum : sum / A.t<T>(i,i);
}
}
}
}
}
BUILD_SINGLE_TEMPLATE(template void triangularSolve2D, (sd::LaunchContext* context, NDArray const& leftInput, NDArray const& rightInput, bool const lower, bool const unitsOnDiag, NDArray& output), FLOAT_TYPES);
*/
}
}
}
|
ea88705f867d51021dfef7a5cee382cc830c1e21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This file is distributed under the MIT license.
// See the LICENSE file for details.
#include <thrust/device_vector.h>
#include <visionaray/math/norm.h>
#include <visionaray/math/vector.h>
#include <visionaray/texture/texture.h>
#include <visionaray/aligned_vector.h>
#include <gtest/gtest.h>
using namespace visionaray;
//-------------------------------------------------------------------------------------------------
// host texture typedefs
//
using h_texture_R8 = visionaray::texture< unorm<8> , 1>;
using h_texture_RG8 = visionaray::texture<vector<2, unorm<8>>, 1>;
using h_texture_RGB8 = visionaray::texture<vector<3, unorm<8>>, 1>;
using h_texture_RGBA8 = visionaray::texture<vector<4, unorm<8>>, 1>;
//-------------------------------------------------------------------------------------------------
// device texture typedefs
//
using d_texture_R8 = cuda_texture< unorm<8> , 1>;
using d_texture_RG8 = cuda_texture< vector<2, unorm<8>>, 1>;
using d_texture_RGB8 = cuda_texture< vector<3, unorm<8>>, 1>;
using d_texture_RGBA8 = cuda_texture< vector<4, unorm<8>>, 1>;
using d_texture_R32F = cuda_texture< float , 1>;
using d_texture_RG32F = cuda_texture< vector<2, float> , 1>;
//-------------------------------------------------------------------------------------------------
// device texture ref typedefs
//
using d_texture_ref_R8 = cuda_texture_ref< unorm<8> , 1>;
using d_texture_ref_RG8 = cuda_texture_ref<vector<2, unorm<8>>, 1>;
using d_texture_ref_RGB8 = cuda_texture_ref<vector<3, unorm<8>>, 1>;
using d_texture_ref_RGBA8 = cuda_texture_ref<vector<4, unorm<8>>, 1>;
using d_texture_ref_R32F = cuda_texture_ref< float , 1>;
using d_texture_ref_RG32F = cuda_texture_ref<vector<2, float > , 1>;
//-------------------------------------------------------------------------------------------------
// sampler kernel
//
template <typename T, typename FloatT>
__global__ void sample1D(
cuda_texture_ref<T, 1> tex,
FloatT* coords,
T* result,
size_t n
)
{
for (size_t i = 0; i < n; ++i)
{
result[i] = tex1D(tex, coords[i]);
}
}
//-------------------------------------------------------------------------------------------------
// Make index based on tex coord and address mode
//
int make_index(float coord, int texsize, tex_address_mode address_mode)
{
if (address_mode == Clamp)
{
auto index = int(coord * texsize);
return max(0, min(index, texsize - 1));
}
else if (address_mode == Wrap)
{
// wrap
auto index = int(coord * texsize);
while (index < 0)
{
index += texsize;
}
return index % texsize;
}
else if (address_mode == Mirror)
{
auto index = int(coord * texsize);
if (index < 0)
{
index += 1;
index *= -1;
}
else if (index >= texsize)
{
int tmp = index - texsize;
index -= tmp * 2;
index -= 1;
}
return index;
}
return -1;
}
//-------------------------------------------------------------------------------------------------
// Raw data
//
struct sampler_R8
{
enum { TexSize = 8 };
enum { NumCoords = 22 };
sampler_R8()
: d_coords(h_coords, h_coords + NumCoords)
, d_result(NumCoords)
, h_texture(TexSize)
{
h_texture.reset( reinterpret_cast<unorm<8> const*>(data) );
reset();
}
void set_address_mode(tex_address_mode address_mode)
{
h_texture.set_address_mode(address_mode);
reset();
}
void set_filter_mode(tex_filter_mode filter_mode)
{
h_texture.set_filter_mode(filter_mode);
reset();
}
void reset()
{
d_texture = d_texture_R8(h_texture);
d_texture_ref = d_texture_ref_R8(d_texture);
}
//-------------------------------------------------------------------------
// Sample into member arrays
//
void sample()
{
hipLaunchKernelGGL(( sample1D), dim3(1), dim3(1), 0, 0,
d_texture_ref,
thrust::raw_pointer_cast(d_coords.data()),
thrust::raw_pointer_cast(d_result.data()),
d_coords.size()
);
h_result = thrust::host_vector<unorm<8>>(d_result);
}
//-------------------------------------------------------------------------
// Sample w/o using the member arrays
//
void sample(
thrust::host_vector<float> const& coords,
thrust::host_vector<unorm<8>>& result
)
{
thrust::device_vector<float> c(coords);
thrust::device_vector<unorm<8>> r(result.size());
hipLaunchKernelGGL(( sample1D), dim3(1), dim3(1), 0, 0,
d_texture_ref,
thrust::raw_pointer_cast(c.data()),
thrust::raw_pointer_cast(r.data()),
c.size()
);
result = thrust::host_vector<unorm<8>>(r);
}
static unsigned char const data[TexSize];
static float const h_coords[NumCoords];
thrust::device_vector<float> d_coords;
thrust::device_vector<unorm<8>> d_result;
thrust::host_vector<unorm<8>> h_result;
h_texture_R8 h_texture;
d_texture_R8 d_texture;
d_texture_ref_R8 d_texture_ref;
};
unsigned char const sampler_R8::data[] = { 0, 16, 32, 64, 96, 128, 255, 255 };
float const sampler_R8::h_coords[] = {
-3.0f / sampler_R8::TexSize, // underflow
-2.0f / sampler_R8::TexSize,
-1.0f / sampler_R8::TexSize,
0.0f / sampler_R8::TexSize, // valid [0.0..1.0) coords
0.5f / sampler_R8::TexSize,
1.0f / sampler_R8::TexSize,
1.5f / sampler_R8::TexSize,
2.0f / sampler_R8::TexSize,
2.5f / sampler_R8::TexSize,
3.0f / sampler_R8::TexSize,
3.5f / sampler_R8::TexSize,
4.0f / sampler_R8::TexSize,
4.5f / sampler_R8::TexSize,
5.0f / sampler_R8::TexSize,
5.5f / sampler_R8::TexSize,
6.0f / sampler_R8::TexSize,
6.5f / sampler_R8::TexSize,
7.0f / sampler_R8::TexSize,
8.0f / sampler_R8::TexSize - 1.0f / sampler_R8::TexSize, // last valid value
8.0f / sampler_R8::TexSize, // overflow
9.0f / sampler_R8::TexSize,
10.0f / sampler_R8::TexSize
};
//-------------------------------------------------------------------------------------------------
// Print lots of samples for debugging
//
template <typename Sampler>
void generate_samples(Sampler& sampler, bool on_device = true, int num_coords = 1024)
{
thrust::host_vector<float> coords(num_coords);
thrust::host_vector<unorm<8>> result(num_coords);
for (int i = 0; i < num_coords; ++i)
{
coords[i] = static_cast<float>(i) / num_coords;
}
if (on_device)
{
sampler.sample(coords, result);
}
else
{
for (int i = 0; i < num_coords; ++i)
{
result[i] = (float)tex1D(sampler.h_texture, coords[i]);
}
}
for (auto r : result)
{
std::cout << static_cast<float>(r) << '\n';
}
}
//-------------------------------------------------------------------------------------------------
// Test CUDA 1-D textures
// Test that device texture lookups and host texture lookups produce equal results
//
TEST(TextureCU, Tex1DR8NormalizedFloatNearest)
{
//-------------------------------------------------------------------------
// R8, normalized float, nearest neighbor
//
sampler_R8 sampler;
sampler.set_filter_mode(Nearest);
// address mode clamp
sampler.set_address_mode(Clamp);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Clamp);
float expected = sampler.data[index] / 255.0f;
// host
EXPECT_FLOAT_EQ(expected, tex1D(sampler.h_texture, sampler.h_coords[i]));
// device
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode wrap
sampler.set_address_mode(Wrap);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Wrap);
float expected = sampler.data[index] / 255.0f;
// host
EXPECT_FLOAT_EQ(expected, tex1D(sampler.h_texture, sampler.h_coords[i]));
// device
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode mirror
sampler.set_address_mode(Mirror);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Mirror);
float expected = sampler.data[index] / 255.0f;
// host
EXPECT_FLOAT_EQ(expected, tex1D(sampler.h_texture, sampler.h_coords[i]));
// device
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
}
TEST(TextureCU, Tex1DR8NormalizedFloatLinear)
{
//-------------------------------------------------------------------------
// R8, normalized float, linear
//
sampler_R8 sampler;
sampler.set_filter_mode(Linear);
// address mode clamp
sampler.set_address_mode(Clamp);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Clamp);
float expected = tex1D(sampler.h_texture, sampler.h_coords[i]);
// check if CPU lookup matches GPU lookup
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode wrap
sampler.set_address_mode(Wrap);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Wrap);
float expected = tex1D(sampler.h_texture, sampler.h_coords[i]);
// check if CPU lookup matches GPU lookup
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode mirror
sampler.set_address_mode(Mirror);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Mirror);
float expected = tex1D(sampler.h_texture, sampler.h_coords[i]);
// check if CPU lookup matches GPU lookup
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
}
| ea88705f867d51021dfef7a5cee382cc830c1e21.cu | // This file is distributed under the MIT license.
// See the LICENSE file for details.
#include <thrust/device_vector.h>
#include <visionaray/math/norm.h>
#include <visionaray/math/vector.h>
#include <visionaray/texture/texture.h>
#include <visionaray/aligned_vector.h>
#include <gtest/gtest.h>
using namespace visionaray;
//-------------------------------------------------------------------------------------------------
// host texture typedefs
//
using h_texture_R8 = visionaray::texture< unorm<8> , 1>;
using h_texture_RG8 = visionaray::texture<vector<2, unorm<8>>, 1>;
using h_texture_RGB8 = visionaray::texture<vector<3, unorm<8>>, 1>;
using h_texture_RGBA8 = visionaray::texture<vector<4, unorm<8>>, 1>;
//-------------------------------------------------------------------------------------------------
// device texture typedefs
//
using d_texture_R8 = cuda_texture< unorm<8> , 1>;
using d_texture_RG8 = cuda_texture< vector<2, unorm<8>>, 1>;
using d_texture_RGB8 = cuda_texture< vector<3, unorm<8>>, 1>;
using d_texture_RGBA8 = cuda_texture< vector<4, unorm<8>>, 1>;
using d_texture_R32F = cuda_texture< float , 1>;
using d_texture_RG32F = cuda_texture< vector<2, float> , 1>;
//-------------------------------------------------------------------------------------------------
// device texture ref typedefs
//
using d_texture_ref_R8 = cuda_texture_ref< unorm<8> , 1>;
using d_texture_ref_RG8 = cuda_texture_ref<vector<2, unorm<8>>, 1>;
using d_texture_ref_RGB8 = cuda_texture_ref<vector<3, unorm<8>>, 1>;
using d_texture_ref_RGBA8 = cuda_texture_ref<vector<4, unorm<8>>, 1>;
using d_texture_ref_R32F = cuda_texture_ref< float , 1>;
using d_texture_ref_RG32F = cuda_texture_ref<vector<2, float > , 1>;
//-------------------------------------------------------------------------------------------------
// sampler kernel
//
template <typename T, typename FloatT>
__global__ void sample1D(
cuda_texture_ref<T, 1> tex,
FloatT* coords,
T* result,
size_t n
)
{
for (size_t i = 0; i < n; ++i)
{
result[i] = tex1D(tex, coords[i]);
}
}
//-------------------------------------------------------------------------------------------------
// Make index based on tex coord and address mode
//
int make_index(float coord, int texsize, tex_address_mode address_mode)
{
if (address_mode == Clamp)
{
auto index = int(coord * texsize);
return max(0, min(index, texsize - 1));
}
else if (address_mode == Wrap)
{
// wrap
auto index = int(coord * texsize);
while (index < 0)
{
index += texsize;
}
return index % texsize;
}
else if (address_mode == Mirror)
{
auto index = int(coord * texsize);
if (index < 0)
{
index += 1;
index *= -1;
}
else if (index >= texsize)
{
int tmp = index - texsize;
index -= tmp * 2;
index -= 1;
}
return index;
}
return -1;
}
//-------------------------------------------------------------------------------------------------
// Raw data
//
struct sampler_R8
{
enum { TexSize = 8 };
enum { NumCoords = 22 };
sampler_R8()
: d_coords(h_coords, h_coords + NumCoords)
, d_result(NumCoords)
, h_texture(TexSize)
{
h_texture.reset( reinterpret_cast<unorm<8> const*>(data) );
reset();
}
void set_address_mode(tex_address_mode address_mode)
{
h_texture.set_address_mode(address_mode);
reset();
}
void set_filter_mode(tex_filter_mode filter_mode)
{
h_texture.set_filter_mode(filter_mode);
reset();
}
void reset()
{
d_texture = d_texture_R8(h_texture);
d_texture_ref = d_texture_ref_R8(d_texture);
}
//-------------------------------------------------------------------------
// Sample into member arrays
//
void sample()
{
sample1D<<<1, 1>>>(
d_texture_ref,
thrust::raw_pointer_cast(d_coords.data()),
thrust::raw_pointer_cast(d_result.data()),
d_coords.size()
);
h_result = thrust::host_vector<unorm<8>>(d_result);
}
//-------------------------------------------------------------------------
// Sample w/o using the member arrays
//
void sample(
thrust::host_vector<float> const& coords,
thrust::host_vector<unorm<8>>& result
)
{
thrust::device_vector<float> c(coords);
thrust::device_vector<unorm<8>> r(result.size());
sample1D<<<1, 1>>>(
d_texture_ref,
thrust::raw_pointer_cast(c.data()),
thrust::raw_pointer_cast(r.data()),
c.size()
);
result = thrust::host_vector<unorm<8>>(r);
}
static unsigned char const data[TexSize];
static float const h_coords[NumCoords];
thrust::device_vector<float> d_coords;
thrust::device_vector<unorm<8>> d_result;
thrust::host_vector<unorm<8>> h_result;
h_texture_R8 h_texture;
d_texture_R8 d_texture;
d_texture_ref_R8 d_texture_ref;
};
unsigned char const sampler_R8::data[] = { 0, 16, 32, 64, 96, 128, 255, 255 };
float const sampler_R8::h_coords[] = {
-3.0f / sampler_R8::TexSize, // underflow
-2.0f / sampler_R8::TexSize,
-1.0f / sampler_R8::TexSize,
0.0f / sampler_R8::TexSize, // valid [0.0..1.0) coords
0.5f / sampler_R8::TexSize,
1.0f / sampler_R8::TexSize,
1.5f / sampler_R8::TexSize,
2.0f / sampler_R8::TexSize,
2.5f / sampler_R8::TexSize,
3.0f / sampler_R8::TexSize,
3.5f / sampler_R8::TexSize,
4.0f / sampler_R8::TexSize,
4.5f / sampler_R8::TexSize,
5.0f / sampler_R8::TexSize,
5.5f / sampler_R8::TexSize,
6.0f / sampler_R8::TexSize,
6.5f / sampler_R8::TexSize,
7.0f / sampler_R8::TexSize,
8.0f / sampler_R8::TexSize - 1.0f / sampler_R8::TexSize, // last valid value
8.0f / sampler_R8::TexSize, // overflow
9.0f / sampler_R8::TexSize,
10.0f / sampler_R8::TexSize
};
//-------------------------------------------------------------------------------------------------
// Print lots of samples for debugging
//
template <typename Sampler>
void generate_samples(Sampler& sampler, bool on_device = true, int num_coords = 1024)
{
thrust::host_vector<float> coords(num_coords);
thrust::host_vector<unorm<8>> result(num_coords);
for (int i = 0; i < num_coords; ++i)
{
coords[i] = static_cast<float>(i) / num_coords;
}
if (on_device)
{
sampler.sample(coords, result);
}
else
{
for (int i = 0; i < num_coords; ++i)
{
result[i] = (float)tex1D(sampler.h_texture, coords[i]);
}
}
for (auto r : result)
{
std::cout << static_cast<float>(r) << '\n';
}
}
//-------------------------------------------------------------------------------------------------
// Test CUDA 1-D textures
// Test that device texture lookups and host texture lookups produce equal results
//
TEST(TextureCU, Tex1DR8NormalizedFloatNearest)
{
//-------------------------------------------------------------------------
// R8, normalized float, nearest neighbor
//
sampler_R8 sampler;
sampler.set_filter_mode(Nearest);
// address mode clamp
sampler.set_address_mode(Clamp);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Clamp);
float expected = sampler.data[index] / 255.0f;
// host
EXPECT_FLOAT_EQ(expected, tex1D(sampler.h_texture, sampler.h_coords[i]));
// device
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode wrap
sampler.set_address_mode(Wrap);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Wrap);
float expected = sampler.data[index] / 255.0f;
// host
EXPECT_FLOAT_EQ(expected, tex1D(sampler.h_texture, sampler.h_coords[i]));
// device
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode mirror
sampler.set_address_mode(Mirror);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Mirror);
float expected = sampler.data[index] / 255.0f;
// host
EXPECT_FLOAT_EQ(expected, tex1D(sampler.h_texture, sampler.h_coords[i]));
// device
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
}
TEST(TextureCU, Tex1DR8NormalizedFloatLinear)
{
//-------------------------------------------------------------------------
// R8, normalized float, linear
//
sampler_R8 sampler;
sampler.set_filter_mode(Linear);
// address mode clamp
sampler.set_address_mode(Clamp);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Clamp);
float expected = tex1D(sampler.h_texture, sampler.h_coords[i]);
// check if CPU lookup matches GPU lookup
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode wrap
sampler.set_address_mode(Wrap);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Wrap);
float expected = tex1D(sampler.h_texture, sampler.h_coords[i]);
// check if CPU lookup matches GPU lookup
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
// address mode mirror
sampler.set_address_mode(Mirror);
sampler.sample();
for (int i = 0; i < sampler.NumCoords; ++i)
{
auto index = make_index(sampler.h_coords[i], sampler.TexSize, Mirror);
float expected = tex1D(sampler.h_texture, sampler.h_coords[i]);
// check if CPU lookup matches GPU lookup
EXPECT_FLOAT_EQ(expected, sampler.h_result[i]);
}
}
|
db5d2c3e880acaa52d323f31cbdfb79b9d88e603.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __MARCHING_KERNEL_CU__
#define __MARCHING_KERNEL_CU__
#include "grid.cuh"
#include "grid_utils.cu"
#include "marching.h"
namespace Marching {
namespace Kernel {
using namespace Grid;
////////////////////////////////////////////////////////////////////////
__device__ float sampleVolume(
int3 cell,
GridData &gridData
) {
if (cell.x >= cudaGridParams.resolution.x ||
cell.y >= cudaGridParams.resolution.y ||
cell.z >= cudaGridParams.resolution.z ||
cell.x < 0 ||
cell.y < 0 ||
cell.z < 0
) {
return 0.0f;
}
volatile uint hash = Utils::computeCellHash(cell, cudaGridParams);
volatile uint cellStart = gridData.cellStart[hash];
//return (cellStart != EMPTY_CELL_VALUE) ? 1.0f : 0.0f;
if (cellStart != EMPTY_CELL_VALUE) {
return (float) gridData.cellStop[hash] - cellStart;
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////
inline __device__ int3 computeVoxelPosition(uint &index) {
float px = cudaGridParams.resolution.x + GRID_OFFSET;
float pxy = px * (cudaGridParams.resolution.y + GRID_OFFSET);
int3 cell;
cell.z = floor(index / pxy);
int tmp = index - (cell.z * pxy);
cell.y = floor(tmp / px);
cell.x = floor(tmp - cell.y * px);
cell -= 1;
return cell;
}
////////////////////////////////////////////////////////////////////////
inline __device__ uint computeIndex(int3 &cell) {
// TODO optimize this expression
float px = cudaGridParams.resolution.x + GRID_OFFSET;
float pxy = px * (cudaGridParams.resolution.y + GRID_OFFSET);
return (cell.x + 1) + (cell.y + 1) * px + (cell.z + 1) * pxy;
}
////////////////////////////////////////////////////////////////////////
// compute interpolated vertex along an edge
__device__ float3 vertexInterolation(
float3 p0,
float3 p1,
float f0,
float f1
) {
p0 = ((p0 + 0.5f) * cudaGridParams.cellSize + cudaGridParams.min);
p1 = ((p1 + 0.5f) * cudaGridParams.cellSize + cudaGridParams.min);
float t = (0.5f - f0) / (f1 - f0);
return lerp(p0, p1, t) - 1.0f + GRID_OFFSET;
}
////////////////////////////////////////////////////////////////////////
// calculate triangle normal
__device__ float3 calculateNormal(float3 *v0, float3 *v1, float3 *v2) {
float3 edge0 = *v1 - *v0;
float3 edge1 = *v2 - *v0;
// note - it's faster to perform normalization in vertex shader
// rather than here
return cross(edge1, edge0);
}
////////////////////////////////////////////////////////////////////////
__global__ void classifyVoxel(
Marching::VoxelData voxelData,
Marching::TableData tableData,
GridData gridData,
uint numCells
) {
int hash =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (hash >= numCells) {
return;
}
int3 cell = make_int3(
threadIdx.x - GRID_OFFSET,
blockIdx.x - GRID_OFFSET,
blockIdx.y - GRID_OFFSET
);
float field[8];
field[0] = sampleVolume(cell, gridData);
field[1] = sampleVolume(cell + make_int3(1,0,0), gridData);
field[2] = sampleVolume(cell + make_int3(1,0,1), gridData);
field[3] = sampleVolume(cell + make_int3(0,0,1), gridData);
field[4] = sampleVolume(cell + make_int3(0,1,0), gridData);
field[5] = sampleVolume(cell + make_int3(1,1,0), gridData);
field[6] = sampleVolume(cell + make_int3(1,1,1), gridData);
field[7] = sampleVolume(cell + make_int3(0,1,1), gridData);
float isoValue = 0.5f;
uint cubeIndex = 0;
cubeIndex = uint(field[0] < isoValue);
cubeIndex += uint(field[1] < isoValue) * 2;
cubeIndex += uint(field[2] < isoValue) * 4;
cubeIndex += uint(field[3] < isoValue) * 8;
cubeIndex += uint(field[4] < isoValue) * 16;
cubeIndex += uint(field[5] < isoValue) * 32;
cubeIndex += uint(field[6] < isoValue) * 64;
cubeIndex += uint(field[7] < isoValue) * 128;
uint numVertices = tableData.numVertices[cubeIndex];
voxelData.vertices[hash] = numVertices;
voxelData.occupied[hash] = (numVertices > 0);
voxelData.cubeIndex[hash] = cubeIndex;
}
////////////////////////////////////////////////////////////////////////
__global__ void compactVoxels(Marching::VoxelData data, uint numCells) {
int hash =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (hash >= numCells) {
return;
}
// if cell is occupied then get index to compact array
// from sorted occupiedScan
// and put hash of cell to compact array
//
// Example:
// H = [0,1,2,3,4,5] --> hash codes
// O = [1,0,1,1,0,1] --> occupied flag
// OS = [0,1,1,2,3,3] --> occupied scan
// C = [0,2,3,5,X,X] --> compacted array
//
if (data.occupied[hash]) {
uint index = data.occupiedScan[hash];
data.compact[index] = hash;
}
}
////////////////////////////////////////////////////////////////////////
__global__ void generateTriangles(
Marching::VertexData vertexData,
Marching::VoxelData voxelData,
Marching::TableData tableData,
GridData gridData,
uint maxVertices,
uint activeVoxels,
float3 cellSize
) {
int index =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (index > activeVoxels - 1) {
index = activeVoxels - 1;
}
uint voxel = voxelData.compact[index];
int3 cell = computeVoxelPosition(voxel);
float3 p;
p.x = cell.x;// + cudaGridParams.min.x;
p.y = cell.y;// + cudaGridParams.min.y;
p.z = cell.z;// + cudaGridParams.min.z;
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(1, 0, 0);
v[2] = p + make_float3(1, 0, 1);
v[3] = p + make_float3(0, 0, 1);
v[4] = p + make_float3(0, 1, 0);
v[5] = p + make_float3(1, 1, 0);
v[6] = p + make_float3(1, 1, 1);
v[7] = p + make_float3(0, 1, 1);
float field[8];
field[0] = sampleVolume(cell, gridData);
field[1] = sampleVolume(cell + make_int3(1,0,0), gridData);
field[2] = sampleVolume(cell + make_int3(1,0,1), gridData);
field[3] = sampleVolume(cell + make_int3(0,0,1), gridData);
field[4] = sampleVolume(cell + make_int3(0,1,0), gridData);
field[5] = sampleVolume(cell + make_int3(1,1,0), gridData);
field[6] = sampleVolume(cell + make_int3(1,1,1), gridData);
field[7] = sampleVolume(cell + make_int3(0,1,1), gridData);
uint cubeIndex = voxelData.cubeIndex[voxel];
// use shared memory to avoid using local
__shared__ float3 vertlist[12*NTHREADS];
vertlist[threadIdx.x] =
vertexInterolation(v[0], v[1], field[0], field[1]);
vertlist[NTHREADS + threadIdx.x] =
vertexInterolation(v[1], v[2], field[1], field[2]);
vertlist[NTHREADS * 2 + threadIdx.x] =
vertexInterolation(v[2], v[3], field[2], field[3]);
vertlist[NTHREADS * 3 + threadIdx.x] =
vertexInterolation(v[3], v[0], field[3], field[0]);
vertlist[NTHREADS * 4 + threadIdx.x] =
vertexInterolation(v[4], v[5], field[4], field[5]);
vertlist[NTHREADS * 5 + threadIdx.x] =
vertexInterolation(v[5], v[6], field[5], field[6]);
vertlist[NTHREADS * 6 + threadIdx.x] =
vertexInterolation(v[6], v[7], field[6], field[7]);
vertlist[NTHREADS * 7 + threadIdx.x] =
vertexInterolation(v[7], v[4], field[7], field[4]);
vertlist[NTHREADS * 8 + threadIdx.x] =
vertexInterolation(v[0], v[4], field[0], field[4]);
vertlist[NTHREADS * 9 + threadIdx.x] =
vertexInterolation(v[1], v[5], field[1], field[5]);
vertlist[NTHREADS * 10 + threadIdx.x] =
vertexInterolation(v[2], v[6], field[2], field[6]);
vertlist[NTHREADS * 11 + threadIdx.x] =
vertexInterolation(v[3], v[7], field[3], field[7]);
__syncthreads();
// output triangle vertices
uint numVerts = tableData.numVertices[cubeIndex];
for(int i=0; i<numVerts; i+=3) {
uint index = voxelData.verticesScan[voxel] + i;
float3 *v[3];
uint edge;
edge = tableData.triangles[(cubeIndex*16) + i];
v[0] = &vertlist[(edge*NTHREADS)+threadIdx.x];
edge = tableData.triangles[(cubeIndex*16) + i + 1];
v[1] = &vertlist[(edge*NTHREADS)+threadIdx.x];
edge = tableData.triangles[(cubeIndex*16) + i + 2];
v[2] = &vertlist[(edge*NTHREADS)+threadIdx.x];
// calculate triangle surface normal
float3 n = calculateNormal(v[0], v[1], v[2]);
if (index < (maxVertices - 3)) {
vertexData.positions[index] = make_float4(*v[0], 1.0f);
vertexData.normals[index] = make_float4(n, 0.0f);
vertexData.positions[index+1] = make_float4(*v[1], 1.0f);
vertexData.normals[index+1] = make_float4(n, 0.0f);
vertexData.positions[index+2] = make_float4(*v[2], 1.0f);
vertexData.normals[index+2] = make_float4(n, 0.0f);
}
}
}
////////////////////////////////////////////////////////////////////////
inline __device__ float4 getNormal(
int3 cell,
Marching::VertexData &vertexData,
Marching::VoxelData &voxelData,
Marching::TableData &tableData,
uint edge
) {
uint voxel = computeIndex(cell);
uint cubeIndex = voxelData.cubeIndex[voxel];
float4 normal = make_float4(0.0f);
for(int i=0; i<tableData.numVertices[cubeIndex]; i++) {
if (tableData.triangles[cubeIndex * 16 + i] == edge) {
uint ind = voxelData.verticesScan[voxel] + i;
normal += vertexData.normals[ind];
}
}
return normal;
}
////////////////////////////////////////////////////////////////////////
inline __device__ void setNormal(
int3 cell,
Marching::VertexData vertexData,
Marching::VoxelData voxelData,
Marching::TableData tableData,
uint edge,
float4 normal
) {
uint voxel = computeIndex(cell);
uint cubeIndex = voxelData.cubeIndex[voxel];
for(int i=0; i<16; i++) {
if (tableData.triangles[cubeIndex * 16 + i] == edge) {
uint ind = voxelData.verticesScan[voxel] + i;
vertexData.normals[ind] = normal;
}
}
}
////////////////////////////////////////////////////////////////////////
__global__ void interpolateNormals(
Marching::VertexData vertexData,
Marching::VoxelData voxelData,
Marching::TableData tableData,
GridData gridData,
uint maxVertices,
uint activeVoxels,
float3 cellSize
) {
int index =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (index > activeVoxels - 1) {
index = activeVoxels - 1;
}
uint voxel = voxelData.compact[index];
int3 cell = computeVoxelPosition(voxel);
uint cubeIndex = voxelData.cubeIndex[voxel];
float4 normals[12];
bool usedEdges[12];
for(int i=0; i<12; i++) {
normals[i] = make_float4(0.0f);
usedEdges[i] = false;
}
for(int i=0; i<tableData.numVertices[cubeIndex]; i++) {
uint edge = tableData.triangles[cubeIndex * 16 + i];
uint ind = voxelData.verticesScan[voxel] + i;
normals[edge] += vertexData.normals[ind];
usedEdges[edge] = true;
}
for (int i=0; i<12; i++) {
if (!usedEdges[i]) {
continue;
}
for (int j=0; j<3; j++) {
uint edge = tableData.adjacentEdges[i*3 + j];
int3 adjCell = cell + tableData.adjacentEdgesPos[i*3 + j];
normals[i] +=
getNormal(
adjCell,
vertexData,
voxelData,
tableData,
edge
);
}
}
for(int i=0; i<tableData.numVertices[cubeIndex]; i++) {
uint edge = tableData.triangles[cubeIndex * 16 + i];
uint ind = voxelData.verticesScan[voxel] + i;
vertexData.inormals[ind] = normals[edge];
}
}
////////////////////////////////////////////////////////////////////////
};
};
#endif // __MARCHING_KERNEL_CU__ | db5d2c3e880acaa52d323f31cbdfb79b9d88e603.cu | #ifndef __MARCHING_KERNEL_CU__
#define __MARCHING_KERNEL_CU__
#include "grid.cuh"
#include "grid_utils.cu"
#include "marching.h"
namespace Marching {
namespace Kernel {
using namespace Grid;
////////////////////////////////////////////////////////////////////////
__device__ float sampleVolume(
int3 cell,
GridData &gridData
) {
if (cell.x >= cudaGridParams.resolution.x ||
cell.y >= cudaGridParams.resolution.y ||
cell.z >= cudaGridParams.resolution.z ||
cell.x < 0 ||
cell.y < 0 ||
cell.z < 0
) {
return 0.0f;
}
volatile uint hash = Utils::computeCellHash(cell, cudaGridParams);
volatile uint cellStart = gridData.cellStart[hash];
//return (cellStart != EMPTY_CELL_VALUE) ? 1.0f : 0.0f;
if (cellStart != EMPTY_CELL_VALUE) {
return (float) gridData.cellStop[hash] - cellStart;
} else {
return 0.0f;
}
}
////////////////////////////////////////////////////////////////////////
inline __device__ int3 computeVoxelPosition(uint &index) {
float px = cudaGridParams.resolution.x + GRID_OFFSET;
float pxy = px * (cudaGridParams.resolution.y + GRID_OFFSET);
int3 cell;
cell.z = floor(index / pxy);
int tmp = index - (cell.z * pxy);
cell.y = floor(tmp / px);
cell.x = floor(tmp - cell.y * px);
cell -= 1;
return cell;
}
////////////////////////////////////////////////////////////////////////
inline __device__ uint computeIndex(int3 &cell) {
// TODO optimize this expression
float px = cudaGridParams.resolution.x + GRID_OFFSET;
float pxy = px * (cudaGridParams.resolution.y + GRID_OFFSET);
return (cell.x + 1) + (cell.y + 1) * px + (cell.z + 1) * pxy;
}
////////////////////////////////////////////////////////////////////////
// compute interpolated vertex along an edge
__device__ float3 vertexInterolation(
float3 p0,
float3 p1,
float f0,
float f1
) {
p0 = ((p0 + 0.5f) * cudaGridParams.cellSize + cudaGridParams.min);
p1 = ((p1 + 0.5f) * cudaGridParams.cellSize + cudaGridParams.min);
float t = (0.5f - f0) / (f1 - f0);
return lerp(p0, p1, t) - 1.0f + GRID_OFFSET;
}
////////////////////////////////////////////////////////////////////////
// calculate triangle normal
__device__ float3 calculateNormal(float3 *v0, float3 *v1, float3 *v2) {
float3 edge0 = *v1 - *v0;
float3 edge1 = *v2 - *v0;
// note - it's faster to perform normalization in vertex shader
// rather than here
return cross(edge1, edge0);
}
////////////////////////////////////////////////////////////////////////
__global__ void classifyVoxel(
Marching::VoxelData voxelData,
Marching::TableData tableData,
GridData gridData,
uint numCells
) {
int hash =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (hash >= numCells) {
return;
}
int3 cell = make_int3(
threadIdx.x - GRID_OFFSET,
blockIdx.x - GRID_OFFSET,
blockIdx.y - GRID_OFFSET
);
float field[8];
field[0] = sampleVolume(cell, gridData);
field[1] = sampleVolume(cell + make_int3(1,0,0), gridData);
field[2] = sampleVolume(cell + make_int3(1,0,1), gridData);
field[3] = sampleVolume(cell + make_int3(0,0,1), gridData);
field[4] = sampleVolume(cell + make_int3(0,1,0), gridData);
field[5] = sampleVolume(cell + make_int3(1,1,0), gridData);
field[6] = sampleVolume(cell + make_int3(1,1,1), gridData);
field[7] = sampleVolume(cell + make_int3(0,1,1), gridData);
float isoValue = 0.5f;
uint cubeIndex = 0;
cubeIndex = uint(field[0] < isoValue);
cubeIndex += uint(field[1] < isoValue) * 2;
cubeIndex += uint(field[2] < isoValue) * 4;
cubeIndex += uint(field[3] < isoValue) * 8;
cubeIndex += uint(field[4] < isoValue) * 16;
cubeIndex += uint(field[5] < isoValue) * 32;
cubeIndex += uint(field[6] < isoValue) * 64;
cubeIndex += uint(field[7] < isoValue) * 128;
uint numVertices = tableData.numVertices[cubeIndex];
voxelData.vertices[hash] = numVertices;
voxelData.occupied[hash] = (numVertices > 0);
voxelData.cubeIndex[hash] = cubeIndex;
}
////////////////////////////////////////////////////////////////////////
__global__ void compactVoxels(Marching::VoxelData data, uint numCells) {
int hash =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (hash >= numCells) {
return;
}
// if cell is occupied then get index to compact array
// from sorted occupiedScan
// and put hash of cell to compact array
//
// Example:
// H = [0,1,2,3,4,5] --> hash codes
// O = [1,0,1,1,0,1] --> occupied flag
// OS = [0,1,1,2,3,3] --> occupied scan
// C = [0,2,3,5,X,X] --> compacted array
//
if (data.occupied[hash]) {
uint index = data.occupiedScan[hash];
data.compact[index] = hash;
}
}
////////////////////////////////////////////////////////////////////////
__global__ void generateTriangles(
Marching::VertexData vertexData,
Marching::VoxelData voxelData,
Marching::TableData tableData,
GridData gridData,
uint maxVertices,
uint activeVoxels,
float3 cellSize
) {
int index =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (index > activeVoxels - 1) {
index = activeVoxels - 1;
}
uint voxel = voxelData.compact[index];
int3 cell = computeVoxelPosition(voxel);
float3 p;
p.x = cell.x;// + cudaGridParams.min.x;
p.y = cell.y;// + cudaGridParams.min.y;
p.z = cell.z;// + cudaGridParams.min.z;
// calculate cell vertex positions
float3 v[8];
v[0] = p;
v[1] = p + make_float3(1, 0, 0);
v[2] = p + make_float3(1, 0, 1);
v[3] = p + make_float3(0, 0, 1);
v[4] = p + make_float3(0, 1, 0);
v[5] = p + make_float3(1, 1, 0);
v[6] = p + make_float3(1, 1, 1);
v[7] = p + make_float3(0, 1, 1);
float field[8];
field[0] = sampleVolume(cell, gridData);
field[1] = sampleVolume(cell + make_int3(1,0,0), gridData);
field[2] = sampleVolume(cell + make_int3(1,0,1), gridData);
field[3] = sampleVolume(cell + make_int3(0,0,1), gridData);
field[4] = sampleVolume(cell + make_int3(0,1,0), gridData);
field[5] = sampleVolume(cell + make_int3(1,1,0), gridData);
field[6] = sampleVolume(cell + make_int3(1,1,1), gridData);
field[7] = sampleVolume(cell + make_int3(0,1,1), gridData);
uint cubeIndex = voxelData.cubeIndex[voxel];
// use shared memory to avoid using local
__shared__ float3 vertlist[12*NTHREADS];
vertlist[threadIdx.x] =
vertexInterolation(v[0], v[1], field[0], field[1]);
vertlist[NTHREADS + threadIdx.x] =
vertexInterolation(v[1], v[2], field[1], field[2]);
vertlist[NTHREADS * 2 + threadIdx.x] =
vertexInterolation(v[2], v[3], field[2], field[3]);
vertlist[NTHREADS * 3 + threadIdx.x] =
vertexInterolation(v[3], v[0], field[3], field[0]);
vertlist[NTHREADS * 4 + threadIdx.x] =
vertexInterolation(v[4], v[5], field[4], field[5]);
vertlist[NTHREADS * 5 + threadIdx.x] =
vertexInterolation(v[5], v[6], field[5], field[6]);
vertlist[NTHREADS * 6 + threadIdx.x] =
vertexInterolation(v[6], v[7], field[6], field[7]);
vertlist[NTHREADS * 7 + threadIdx.x] =
vertexInterolation(v[7], v[4], field[7], field[4]);
vertlist[NTHREADS * 8 + threadIdx.x] =
vertexInterolation(v[0], v[4], field[0], field[4]);
vertlist[NTHREADS * 9 + threadIdx.x] =
vertexInterolation(v[1], v[5], field[1], field[5]);
vertlist[NTHREADS * 10 + threadIdx.x] =
vertexInterolation(v[2], v[6], field[2], field[6]);
vertlist[NTHREADS * 11 + threadIdx.x] =
vertexInterolation(v[3], v[7], field[3], field[7]);
__syncthreads();
// output triangle vertices
uint numVerts = tableData.numVertices[cubeIndex];
for(int i=0; i<numVerts; i+=3) {
uint index = voxelData.verticesScan[voxel] + i;
float3 *v[3];
uint edge;
edge = tableData.triangles[(cubeIndex*16) + i];
v[0] = &vertlist[(edge*NTHREADS)+threadIdx.x];
edge = tableData.triangles[(cubeIndex*16) + i + 1];
v[1] = &vertlist[(edge*NTHREADS)+threadIdx.x];
edge = tableData.triangles[(cubeIndex*16) + i + 2];
v[2] = &vertlist[(edge*NTHREADS)+threadIdx.x];
// calculate triangle surface normal
float3 n = calculateNormal(v[0], v[1], v[2]);
if (index < (maxVertices - 3)) {
vertexData.positions[index] = make_float4(*v[0], 1.0f);
vertexData.normals[index] = make_float4(n, 0.0f);
vertexData.positions[index+1] = make_float4(*v[1], 1.0f);
vertexData.normals[index+1] = make_float4(n, 0.0f);
vertexData.positions[index+2] = make_float4(*v[2], 1.0f);
vertexData.normals[index+2] = make_float4(n, 0.0f);
}
}
}
////////////////////////////////////////////////////////////////////////
inline __device__ float4 getNormal(
int3 cell,
Marching::VertexData &vertexData,
Marching::VoxelData &voxelData,
Marching::TableData &tableData,
uint edge
) {
uint voxel = computeIndex(cell);
uint cubeIndex = voxelData.cubeIndex[voxel];
float4 normal = make_float4(0.0f);
for(int i=0; i<tableData.numVertices[cubeIndex]; i++) {
if (tableData.triangles[cubeIndex * 16 + i] == edge) {
uint ind = voxelData.verticesScan[voxel] + i;
normal += vertexData.normals[ind];
}
}
return normal;
}
////////////////////////////////////////////////////////////////////////
inline __device__ void setNormal(
int3 cell,
Marching::VertexData vertexData,
Marching::VoxelData voxelData,
Marching::TableData tableData,
uint edge,
float4 normal
) {
uint voxel = computeIndex(cell);
uint cubeIndex = voxelData.cubeIndex[voxel];
for(int i=0; i<16; i++) {
if (tableData.triangles[cubeIndex * 16 + i] == edge) {
uint ind = voxelData.verticesScan[voxel] + i;
vertexData.normals[ind] = normal;
}
}
}
////////////////////////////////////////////////////////////////////////
__global__ void interpolateNormals(
Marching::VertexData vertexData,
Marching::VoxelData voxelData,
Marching::TableData tableData,
GridData gridData,
uint maxVertices,
uint activeVoxels,
float3 cellSize
) {
int index =
threadIdx.x +
__mul24(
blockDim.x,
blockIdx.x + __mul24(blockIdx.y, gridDim.x)
);
if (index > activeVoxels - 1) {
index = activeVoxels - 1;
}
uint voxel = voxelData.compact[index];
int3 cell = computeVoxelPosition(voxel);
uint cubeIndex = voxelData.cubeIndex[voxel];
float4 normals[12];
bool usedEdges[12];
for(int i=0; i<12; i++) {
normals[i] = make_float4(0.0f);
usedEdges[i] = false;
}
for(int i=0; i<tableData.numVertices[cubeIndex]; i++) {
uint edge = tableData.triangles[cubeIndex * 16 + i];
uint ind = voxelData.verticesScan[voxel] + i;
normals[edge] += vertexData.normals[ind];
usedEdges[edge] = true;
}
for (int i=0; i<12; i++) {
if (!usedEdges[i]) {
continue;
}
for (int j=0; j<3; j++) {
uint edge = tableData.adjacentEdges[i*3 + j];
int3 adjCell = cell + tableData.adjacentEdgesPos[i*3 + j];
normals[i] +=
getNormal(
adjCell,
vertexData,
voxelData,
tableData,
edge
);
}
}
for(int i=0; i<tableData.numVertices[cubeIndex]; i++) {
uint edge = tableData.triangles[cubeIndex * 16 + i];
uint ind = voxelData.verticesScan[voxel] + i;
vertexData.inormals[ind] = normals[edge];
}
}
////////////////////////////////////////////////////////////////////////
};
};
#endif // __MARCHING_KERNEL_CU__ |
8a2729653928909bc9e6ca308a1af2e85359db56.hip | // !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Questo algoritmo una modifica dell' Union Find (BUF) che esegue usa la FindAndCompress al posto della
// find usata dall'UF. La FindAndCompress aggiorna la label del pixel di partenza ad ogni iterazione della
// procedura di ricerca della label root. Ovvert se l'albero di equivalenza cos costruito:
// A
// /
// B
// /
// C
// allora all prima iterazione aggiorno la label di C sostituendola con B e all'iterazione successiva con A.
// In questo modo se un altro thread legge il mio valore a met trova gi B ed evita un passaggio. Funziona meglio
// dell'UF.
// Il minimo per entrambi 4
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned FindCompress(int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned id = n;
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
s_buf[id] = label;
assert(label > 0);
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
//Effettuo il controllo sui 4 vicini della maschera
//Prova a sincronizzare dopo ogni vicino
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned local_index = local_row * BLOCK_COLS + local_col;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
__shared__ int s_buf[BLOCK_ROWS * BLOCK_COLS];
__shared__ unsigned char s_img[BLOCK_ROWS * BLOCK_COLS];
bool in_limits = (global_row < img.rows && global_col < img.cols);
s_buf[local_index] = local_index + 1;
s_img[local_index] = in_limits ? img[img_index] : 0xFF;
__syncthreads();
unsigned char v = s_img[local_index];
if (in_limits) {
if (v) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index, local_index - 1);
}
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
Union(s_buf, local_index, local_index - BLOCK_COLS);
}
}
else {
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index - 1, local_index - BLOCK_COLS);
}
if (local_col < BLOCK_COLS - 1 && s_img[local_index + 1]) {
Union(s_buf, local_index + 1, local_index - BLOCK_COLS);
}
}
}
}
__syncthreads();
if (in_limits) {
if (v) {
unsigned f = FindCompress(s_buf, local_index);
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * BLOCK_COLS + f_col);
labels.data[global_row * labels.step / sizeof(int) + global_col] = global_f + 1; // C' distinzione tra background e foreground
}
else {
labels.data[global_row * labels.step / sizeof(int) + global_col] = 0;
}
}
}
__global__ void GlobalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
bool in_limits = (global_row < img.rows && global_col < img.cols);
if (in_limits) {
unsigned char v = img[img_index];
if (v) {
if (global_col > 0 && local_col == 0 && img[img_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
if (global_row > 0 && local_row == 0 && img[img_index - img.step]) {
Union(labels.data, labels_index, labels_index - labels.step / sizeof(int));
}
}
else {
if (global_row > 0 && img[img_index - img.step]) {
if (global_col > 0 && (local_row == 0 || local_col == 0) && img[img_index - 1]) {
Union(labels.data, labels_index - 1, labels_index - labels.step / sizeof(int));
}
if ((global_col < img.cols - 1) && (local_row == 0 || local_col == BLOCK_COLS - 1) && img[img_index + 1]) {
Union(labels.data, labels_index + 1, labels_index - labels.step / sizeof(int));
}
}
}
}
}
__global__ void PathCompression(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < labels.rows && global_col < labels.cols) {
unsigned char val = img[global_row * img.step + global_col];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class CUDA_UF_InlineCompression : public GpuLabeling2D<CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
CUDA_UF_InlineCompression() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
// Phase 1
// Etichetta i pixel localmente al blocco
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
void GlobalScan() {
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(CUDA_UF_InlineCompression);
| 8a2729653928909bc9e6ca308a1af2e85359db56.cu | #include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
// Questo algoritmo è una modifica dell' Union Find (BUF) che esegue usa la FindAndCompress al posto della
// find usata dall'UF. La FindAndCompress aggiorna la label del pixel di partenza ad ogni iterazione della
// procedura di ricerca della label root. Ovvert se l'albero di equivalenza è così costruito:
// A
// /
// B
// /
// C
// allora all prima iterazione aggiorno la label di C sostituendola con B e all'iterazione successiva con A.
// In questo modo se un altro thread legge il mio valore a metà trova già B ed evita un passaggio. Funziona meglio
// dell'UF.
// Il minimo per entrambi è 4
#define BLOCK_ROWS 16
#define BLOCK_COLS 16
using namespace cv;
namespace {
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
assert(label > 0);
}
return n;
}
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned FindCompress(int *s_buf, unsigned n) {
// Attenzione: non invocare la find su un pixel di background
unsigned id = n;
unsigned label = s_buf[n];
assert(label > 0);
while (label - 1 != n) {
n = label - 1;
label = s_buf[n];
s_buf[id] = label;
assert(label > 0);
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a + 1);
done = (old == b + 1);
b = old - 1;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b + 1);
done = (old == a + 1);
a = old - 1;
}
else {
done = true;
}
} while (!done);
}
//Effettuo il controllo sui 4 vicini della maschera
//Prova a sincronizzare dopo ogni vicino
__global__ void LocalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned local_index = local_row * BLOCK_COLS + local_col;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
__shared__ int s_buf[BLOCK_ROWS * BLOCK_COLS];
__shared__ unsigned char s_img[BLOCK_ROWS * BLOCK_COLS];
bool in_limits = (global_row < img.rows && global_col < img.cols);
s_buf[local_index] = local_index + 1;
s_img[local_index] = in_limits ? img[img_index] : 0xFF;
__syncthreads();
unsigned char v = s_img[local_index];
if (in_limits) {
if (v) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index, local_index - 1);
}
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
Union(s_buf, local_index, local_index - BLOCK_COLS);
}
}
else {
if (local_row > 0 && s_img[local_index - BLOCK_COLS]) {
if (local_col > 0 && s_img[local_index - 1]) {
Union(s_buf, local_index - 1, local_index - BLOCK_COLS);
}
if (local_col < BLOCK_COLS - 1 && s_img[local_index + 1]) {
Union(s_buf, local_index + 1, local_index - BLOCK_COLS);
}
}
}
}
__syncthreads();
if (in_limits) {
if (v) {
unsigned f = FindCompress(s_buf, local_index);
unsigned f_row = f / BLOCK_COLS;
unsigned f_col = f % BLOCK_COLS;
unsigned global_f = (blockIdx.y * BLOCK_ROWS + f_row) * (labels.step / labels.elem_size) + (blockIdx.x * BLOCK_COLS + f_col);
labels.data[global_row * labels.step / sizeof(int) + global_col] = global_f + 1; // C'è distinzione tra background e foreground
}
else {
labels.data[global_row * labels.step / sizeof(int) + global_col] = 0;
}
}
}
__global__ void GlobalMerge(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned local_row = threadIdx.y;
unsigned local_col = threadIdx.x;
unsigned global_row = blockIdx.y * BLOCK_ROWS + local_row;
unsigned global_col = blockIdx.x * BLOCK_COLS + local_col;
unsigned img_index = global_row * img.step + global_col;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
bool in_limits = (global_row < img.rows && global_col < img.cols);
if (in_limits) {
unsigned char v = img[img_index];
if (v) {
if (global_col > 0 && local_col == 0 && img[img_index - 1]) {
Union(labels.data, labels_index, labels_index - 1);
}
if (global_row > 0 && local_row == 0 && img[img_index - img.step]) {
Union(labels.data, labels_index, labels_index - labels.step / sizeof(int));
}
}
else {
if (global_row > 0 && img[img_index - img.step]) {
if (global_col > 0 && (local_row == 0 || local_col == 0) && img[img_index - 1]) {
Union(labels.data, labels_index - 1, labels_index - labels.step / sizeof(int));
}
if ((global_col < img.cols - 1) && (local_row == 0 || local_col == BLOCK_COLS - 1) && img[img_index + 1]) {
Union(labels.data, labels_index + 1, labels_index - labels.step / sizeof(int));
}
}
}
}
}
__global__ void PathCompression(const cuda::PtrStepSzb img, cuda::PtrStepSzi labels) {
unsigned global_row = blockIdx.y * BLOCK_ROWS + threadIdx.y;
unsigned global_col = blockIdx.x * BLOCK_COLS + threadIdx.x;
unsigned labels_index = global_row * (labels.step / labels.elem_size) + global_col;
if (global_row < labels.rows && global_col < labels.cols) {
unsigned char val = img[global_row * img.step + global_col];
if (val) {
labels[labels_index] = Find(labels.data, labels_index) + 1;
}
}
}
}
class CUDA_UF_InlineCompression : public GpuLabeling2D<CONN_8> {
private:
dim3 grid_size_;
dim3 block_size_;
public:
CUDA_UF_InlineCompression() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.size(), CV_32SC1);
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
// Phase 1
// Etichetta i pixel localmente al blocco
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della prima fase
//cuda::GpuMat d_local_labels;
//d_img_labels_.copyTo(d_local_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_local_labels);
//Mat1i local_labels(img_.size());
//d_local_labels.download(local_labels);
// Phase 2
// Collega tra loro gli alberi union-find dei diversi blocchi
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
// Immagine di debug della seconda fase
//cuda::GpuMat d_global_labels;
//d_img_labels_.copyTo(d_global_labels);
//PathCompression << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//// ZeroBackground << <grid_size_, block_size_ >> > (d_img_, d_global_labels);
//Mat1i global_labels(img_.size());
//d_global_labels.download(global_labels);
// Phase 3
// Collassa gli alberi union-find sulle radici
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
private:
double Alloc() {
perf_.start();
d_img_labels_.create(d_img_.size(), CV_32SC1);
perf_.stop();
return perf_.last();
}
double Dealloc() {
perf_.start();
perf_.stop();
return perf_.last();
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void LocalScan() {
grid_size_ = dim3((d_img_.cols + BLOCK_COLS - 1) / BLOCK_COLS, (d_img_.rows + BLOCK_ROWS - 1) / BLOCK_ROWS, 1);
block_size_ = dim3(BLOCK_COLS, BLOCK_ROWS, 1);
LocalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
void GlobalScan() {
GlobalMerge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
PathCompression << <grid_size_, block_size_ >> > (d_img_, d_img_labels_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
double alloc_timing = Alloc();
perf_.start();
LocalScan();
perf_.stop();
perf_.store(Step(StepType::FIRST_SCAN), perf_.last());
perf_.start();
GlobalScan();
perf_.stop();
perf_.store(Step(StepType::SECOND_SCAN), perf_.last());
double dealloc_timing = Dealloc();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(CUDA_UF_InlineCompression);
|
2696a6249c14671428e789947a75712727f47c93.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/attributes.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Returns a numeric column containing lengths of each string in
* based on the provided unary function.
*
* Any null string will result in a null entry for that row in the output column.
*
* @tparam UnaryFunction Device function that returns an integer given a string_view.
* @param strings Strings instance for this operation.
* @param ufn Function returns an integer for each string.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New INT32 column with lengths for each string.
*/
template <typename UnaryFunction>
std::unique_ptr<column> counts_fn(strings_column_view const& strings,
UnaryFunction& ufn,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column
auto results = std::make_unique<cudf::column>(
cudf::data_type{type_id::INT32},
strings_count,
rmm::device_buffer(strings_count * sizeof(int32_t), stream, mr),
cudf::detail::copy_bitmask(strings.parent(), stream, mr), // copy the null mask
strings.null_count());
auto results_view = results->mutable_view();
auto d_lengths = results_view.data<int32_t>();
// fill in the lengths
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_lengths,
[d_strings, ufn] __device__(size_type idx) {
int32_t length = 0;
if (!d_strings.is_null(idx))
length = static_cast<int32_t>(ufn(d_strings.element<string_view>(idx)));
return length;
});
results->set_null_count(strings.null_count()); // reset null count
return results;
}
} // namespace
std::unique_ptr<column> count_characters(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.length(); };
return counts_fn(strings, ufn, stream, mr);
}
std::unique_ptr<column> count_bytes(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.size_bytes(); };
return counts_fn(strings, ufn, stream, mr);
}
} // namespace detail
namespace {
/**
* @brief Sets the code-point values for each character in the output
* integer memory for each string in the strings column.
*
* For each string, there is a sub-array in d_results with length equal
* to the number of characters in that string. The function here will
* write code-point values to that section as pointed to by the
* corresponding d_offsets value calculated for that string.
*/
struct code_points_fn {
column_device_view d_strings;
size_type* d_offsets; // offset within d_results to fill with each string's code-point values
int32_t* d_results; // base integer array output
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) return;
auto d_str = d_strings.element<string_view>(idx);
auto result = d_results + d_offsets[idx];
thrust::copy(thrust::seq, d_str.begin(), d_str.end(), result);
}
};
} // namespace
namespace detail {
//
std::unique_ptr<column> code_points(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create offsets vector to account for each string's character length
rmm::device_vector<size_type> offsets(strings.size() + 1);
size_type* d_offsets = offsets.data().get();
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_offsets + 1,
[d_column] __device__(size_type idx) {
size_type length = 0;
if (!d_column.is_null(idx)) length = d_column.element<string_view>(idx).length();
return length;
},
thrust::plus<size_type>());
CUDA_TRY(hipMemsetAsync(d_offsets, 0, sizeof(size_type), stream.value()));
// the total size is the number of characters in the entire column
size_type num_characters = offsets.back();
// create output column with no nulls
auto results = make_numeric_column(
data_type{type_id::INT32}, num_characters, mask_state::UNALLOCATED, stream, mr);
auto results_view = results->mutable_view();
// fill column with character code-point values
auto d_results = results_view.data<int32_t>();
// now set the ranges from each strings' character values
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
code_points_fn{d_column, d_offsets, d_results});
results->set_null_count(0);
return results;
}
} // namespace detail
// external APIS
std::unique_ptr<column> count_characters(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_characters(strings, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> count_bytes(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_bytes(strings, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> code_points(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::code_points(strings, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
| 2696a6249c14671428e789947a75712727f47c93.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/null_mask.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/attributes.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/transform.h>
#include <thrust/transform_scan.h>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Returns a numeric column containing lengths of each string in
* based on the provided unary function.
*
* Any null string will result in a null entry for that row in the output column.
*
* @tparam UnaryFunction Device function that returns an integer given a string_view.
* @param strings Strings instance for this operation.
* @param ufn Function returns an integer for each string.
* @param stream CUDA stream used for device memory operations and kernel launches.
* @param mr Device memory resource used to allocate the returned column's device memory
* @return New INT32 column with lengths for each string.
*/
template <typename UnaryFunction>
std::unique_ptr<column> counts_fn(strings_column_view const& strings,
UnaryFunction& ufn,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr)
{
auto strings_count = strings.size();
auto strings_column = cudf::column_device_view::create(strings.parent(), stream);
auto d_strings = *strings_column;
// create output column
auto results = std::make_unique<cudf::column>(
cudf::data_type{type_id::INT32},
strings_count,
rmm::device_buffer(strings_count * sizeof(int32_t), stream, mr),
cudf::detail::copy_bitmask(strings.parent(), stream, mr), // copy the null mask
strings.null_count());
auto results_view = results->mutable_view();
auto d_lengths = results_view.data<int32_t>();
// fill in the lengths
thrust::transform(rmm::exec_policy(stream),
thrust::make_counting_iterator<cudf::size_type>(0),
thrust::make_counting_iterator<cudf::size_type>(strings_count),
d_lengths,
[d_strings, ufn] __device__(size_type idx) {
int32_t length = 0;
if (!d_strings.is_null(idx))
length = static_cast<int32_t>(ufn(d_strings.element<string_view>(idx)));
return length;
});
results->set_null_count(strings.null_count()); // reset null count
return results;
}
} // namespace
std::unique_ptr<column> count_characters(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.length(); };
return counts_fn(strings, ufn, stream, mr);
}
std::unique_ptr<column> count_bytes(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto ufn = [] __device__(const string_view& d_str) { return d_str.size_bytes(); };
return counts_fn(strings, ufn, stream, mr);
}
} // namespace detail
namespace {
/**
* @brief Sets the code-point values for each character in the output
* integer memory for each string in the strings column.
*
* For each string, there is a sub-array in d_results with length equal
* to the number of characters in that string. The function here will
* write code-point values to that section as pointed to by the
* corresponding d_offsets value calculated for that string.
*/
struct code_points_fn {
column_device_view d_strings;
size_type* d_offsets; // offset within d_results to fill with each string's code-point values
int32_t* d_results; // base integer array output
__device__ void operator()(size_type idx)
{
if (d_strings.is_null(idx)) return;
auto d_str = d_strings.element<string_view>(idx);
auto result = d_results + d_offsets[idx];
thrust::copy(thrust::seq, d_str.begin(), d_str.end(), result);
}
};
} // namespace
namespace detail {
//
std::unique_ptr<column> code_points(
strings_column_view const& strings,
rmm::cuda_stream_view stream,
rmm::mr::device_memory_resource* mr = rmm::mr::get_current_device_resource())
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create offsets vector to account for each string's character length
rmm::device_vector<size_type> offsets(strings.size() + 1);
size_type* d_offsets = offsets.data().get();
thrust::transform_inclusive_scan(
rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_offsets + 1,
[d_column] __device__(size_type idx) {
size_type length = 0;
if (!d_column.is_null(idx)) length = d_column.element<string_view>(idx).length();
return length;
},
thrust::plus<size_type>());
CUDA_TRY(cudaMemsetAsync(d_offsets, 0, sizeof(size_type), stream.value()));
// the total size is the number of characters in the entire column
size_type num_characters = offsets.back();
// create output column with no nulls
auto results = make_numeric_column(
data_type{type_id::INT32}, num_characters, mask_state::UNALLOCATED, stream, mr);
auto results_view = results->mutable_view();
// fill column with character code-point values
auto d_results = results_view.data<int32_t>();
// now set the ranges from each strings' character values
thrust::for_each_n(rmm::exec_policy(stream),
thrust::make_counting_iterator<size_type>(0),
strings.size(),
code_points_fn{d_column, d_offsets, d_results});
results->set_null_count(0);
return results;
}
} // namespace detail
// external APIS
std::unique_ptr<column> count_characters(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_characters(strings, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> count_bytes(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::count_bytes(strings, rmm::cuda_stream_default, mr);
}
std::unique_ptr<column> code_points(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::code_points(strings, rmm::cuda_stream_default, mr);
}
} // namespace strings
} // namespace cudf
|
4405059ad92175cf8637ba8a5344a45979dc7218.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 %s -triple x86_64-linux-unknown -fsyntax-only -o - -verify
// RUN: %clang_cc1 %s -fcuda-is-device -triple nvptx -fsyntax-only -o - -verify
#include "Inputs/cuda.h"
// Check that we get an error if we try to call a __device__ function from a
// module initializer.
struct S {
// expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: requires 1 argument, but 0 were provided}}
// expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: requires 1 argument, but 0 were provided}}
__device__ S() {}
// expected-note@-1 {{candidate constructor not viable: call to __device__ function from __host__ function}}
};
S s;
// expected-error@-1 {{no matching constructor for initialization of 'S'}}
struct T {
__host__ __device__ T() {}
};
T t; // No error, this is OK.
struct U {
// expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int' to 'const U' for 1st argument}}
// expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int' to 'U' for 1st argument}}
__host__ U() {}
// expected-note@-1 {{candidate constructor not viable: requires 0 arguments, but 1 was provided}}
__device__ U(int) {}
// expected-note@-1 {{candidate constructor not viable: call to __device__ function from __host__ function}}
};
U u(42);
// expected-error@-1 {{no matching constructor for initialization of 'U'}}
__device__ int device_fn() { return 42; }
// expected-note@-1 {{candidate function not viable: call to __device__ function from __host__ function}}
int n = device_fn();
// expected-error@-1 {{no matching function for call to 'device_fn'}}
// Check host/device-based overloding resolution in global variable initializer.
double pow(double, double);
__device__ double pow(double, int);
double X = pow(1.0, 1);
__device__ double Y = pow(2.0, 2); // expected-error{{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
constexpr double cpow(double, double) { return 1.0; }
constexpr __device__ double cpow(double, int) { return 2.0; }
const double CX = cpow(1.0, 1);
const __device__ double CY = cpow(2.0, 2);
struct A {
double pow(double, double);
__device__ double pow(double, int);
constexpr double cpow(double, double) const { return 1.0; }
constexpr __device__ double cpow(double, int) const { return 1.0; }
};
A a;
double AX = a.pow(1.0, 1);
__device__ double AY = a.pow(2.0, 2); // expected-error{{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
const A ca;
const double CAX = ca.cpow(1.0, 1);
const __device__ double CAY = ca.cpow(2.0, 2);
| 4405059ad92175cf8637ba8a5344a45979dc7218.cu | // RUN: %clang_cc1 %s -triple x86_64-linux-unknown -fsyntax-only -o - -verify
// RUN: %clang_cc1 %s -fcuda-is-device -triple nvptx -fsyntax-only -o - -verify
#include "Inputs/cuda.h"
// Check that we get an error if we try to call a __device__ function from a
// module initializer.
struct S {
// expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: requires 1 argument, but 0 were provided}}
// expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: requires 1 argument, but 0 were provided}}
__device__ S() {}
// expected-note@-1 {{candidate constructor not viable: call to __device__ function from __host__ function}}
};
S s;
// expected-error@-1 {{no matching constructor for initialization of 'S'}}
struct T {
__host__ __device__ T() {}
};
T t; // No error, this is OK.
struct U {
// expected-note@-1 {{candidate constructor (the implicit copy constructor) not viable: no known conversion from 'int' to 'const U' for 1st argument}}
// expected-note@-2 {{candidate constructor (the implicit move constructor) not viable: no known conversion from 'int' to 'U' for 1st argument}}
__host__ U() {}
// expected-note@-1 {{candidate constructor not viable: requires 0 arguments, but 1 was provided}}
__device__ U(int) {}
// expected-note@-1 {{candidate constructor not viable: call to __device__ function from __host__ function}}
};
U u(42);
// expected-error@-1 {{no matching constructor for initialization of 'U'}}
__device__ int device_fn() { return 42; }
// expected-note@-1 {{candidate function not viable: call to __device__ function from __host__ function}}
int n = device_fn();
// expected-error@-1 {{no matching function for call to 'device_fn'}}
// Check host/device-based overloding resolution in global variable initializer.
double pow(double, double);
__device__ double pow(double, int);
double X = pow(1.0, 1);
__device__ double Y = pow(2.0, 2); // expected-error{{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
constexpr double cpow(double, double) { return 1.0; }
constexpr __device__ double cpow(double, int) { return 2.0; }
const double CX = cpow(1.0, 1);
const __device__ double CY = cpow(2.0, 2);
struct A {
double pow(double, double);
__device__ double pow(double, int);
constexpr double cpow(double, double) const { return 1.0; }
constexpr __device__ double cpow(double, int) const { return 1.0; }
};
A a;
double AX = a.pow(1.0, 1);
__device__ double AY = a.pow(2.0, 2); // expected-error{{dynamic initialization is not supported for __device__, __constant__, __shared__, and __managed__ variables}}
const A ca;
const double CAX = ca.cpow(1.0, 1);
const __device__ double CAY = ca.cpow(2.0, 2);
|
492af1c958058a24fa60a06cd35d0437db3cc7de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MatrixMulKernelTiled(float* Md, float* Nd, float* Pd, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
} | 492af1c958058a24fa60a06cd35d0437db3cc7de.cu | #include "includes.h"
__global__ void MatrixMulKernelTiled(float* Md, float* Nd, float* Pd, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the Pd element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the Md and Nd tiles required to compute the Pd element
for (int m = 0; m < Width/TILE_WIDTH; ++m) {
// Collaborative loading of Md and Nd tiles into shared memory
Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)];
Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
Pd[Row*Width+Col] = Pvalue;
} |
bd49542781fe993ef409d8fd69f4f4b0a22adc9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void random(double *x, hiprandState_t *global_state){
int tid = blockIdx.x;
hiprandState_t local_state;
local_state = global_state[tid];
x[tid] = (double) hiprand(&local_state);
global_state[tid] = local_state;
} | bd49542781fe993ef409d8fd69f4f4b0a22adc9d.cu | #include "includes.h"
__global__ void random(double *x, curandState *global_state){
int tid = blockIdx.x;
curandState local_state;
local_state = global_state[tid];
x[tid] = (double) curand(&local_state);
global_state[tid] = local_state;
} |
b849042f2f7086f5be6cb1ec6406c46138f7d367.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/Utils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/native/SpectralOpsUtils.h>
#include <ATen/native/hip/CuFFTUtils.h>
#include <ATen/native/hip/CuFFTPlanCache.h>
#include <THH/THHTensorSort.cuh>
#include <THH/THHThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <vector>
#include <cmath>
namespace at { namespace native {
using namespace at::native::detail;
// In real-to-complex transform, cuFFT only fills half of the values due to
// conjugate symmetry. See native/SpectralUtils.h for more details.
// The following structs are used to fill in the other half with symmetry in
// case of real-to-complex transform with onesided=False flag.
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
// counting_iterator => index to fill
struct cnt_to_dst_idx_functor : public thrust::unary_function<int64_t, int64_t>
{
int64_t last_dim_size;
int64_t last_dim_start_slice;
int64_t last_dim_to_fill_size;
cnt_to_dst_idx_functor(int64_t last_dim_size, int64_t last_dim_start_slice) :
last_dim_size(last_dim_size), last_dim_start_slice(last_dim_start_slice),
last_dim_to_fill_size(last_dim_size - last_dim_start_slice) {}
// HIP wants __host__ __device__ tag, CUDA does not
#ifdef __HIP_PLATFORM_HCC__
__host__ __device__
#endif
cnt_to_dst_idx_functor & operator=(const cnt_to_dst_idx_functor&) = default;
__host__ __device__ __forceinline__
int64_t operator()(const int64_t& i) const
{
int64_t imag = i % 2;
int64_t idx = i / 2;
int64_t num_dim = idx / last_dim_to_fill_size;
int64_t slice_idx = idx % last_dim_to_fill_size;
return (num_dim * last_dim_size + last_dim_start_slice + slice_idx) * 2 + imag;
}
};
// index to fill => index to read from
template <typename scalar_t>
struct dst_idx_to_src_functor : public thrust::unary_function<int64_t, scalar_t>
{
// output can have at most dim 5 (batch + 3 signal dim + real/imag)
int64_t sizes[max_rank + 2], strides[max_rank + 2];
const int64_t signal_ndim;
scalar_t *data; // device ptr
dst_idx_to_src_functor(const Tensor& batched_complex_signal)
: signal_ndim(batched_complex_signal.dim() - 1),
data(batched_complex_signal.data_ptr<scalar_t>()) {
for (int64_t i = 0; i < signal_ndim; i++) {
sizes[i] = batched_complex_signal.size(i);
strides[i] = batched_complex_signal.stride(i);
}
}
__device__ __forceinline__
scalar_t operator()(const int64_t& write_idx_with_imag) const
{
int64_t imag = write_idx_with_imag % 2;
// all but first (batch) and last (real/imag) dims need to be reflected
int64_t read_idx = 0;
int64_t remainder = write_idx_with_imag - imag;
int64_t dim_idx, dim_stride;
for (int64_t i = 0; i < signal_ndim; i++) {
dim_stride = strides[i];
dim_idx = remainder / dim_stride;
if (i == 0) {
read_idx += dim_idx * dim_stride;
} else if (dim_idx != 0) {
read_idx += (sizes[i] - dim_idx) * dim_stride;
}
remainder = remainder % dim_stride;
}
if (imag) {
return -data[read_idx + 1];
} else {
return data[read_idx];
}
}
};
// input should be a contiguous batched tensor of same size as full (twosided)
// signals, but only contains half (onesided) of the values.
// This function modifies inplace.
__forceinline__
static void _fft_fill_with_conjugate_symmetry_(Tensor& input,
int64_t size_last_dim, int64_t last_dim_start_slice) {
if (last_dim_start_slice >= size_last_dim) {
return;
}
// copy
int64_t n = input.numel() / size_last_dim * (size_last_dim - last_dim_start_slice);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::hip::par(allocator).on(stream);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "_fft_fill_with_conjugate_symmetry_", [&] {
typedef thrust::device_ptr<scalar_t> device_ptr;
typedef thrust::counting_iterator<int64_t> counter;
typedef thrust::transform_iterator<cnt_to_dst_idx_functor, counter> dst_idx_iterator;
typedef thrust::permutation_iterator<device_ptr, dst_idx_iterator> dst_iterator;
typedef thrust::transform_iterator<dst_idx_to_src_functor<scalar_t>, dst_idx_iterator> src_iterator;
dst_idx_iterator dst_idxs(counter(0), cnt_to_dst_idx_functor(size_last_dim, last_dim_start_slice));
auto data = device_ptr(input.data_ptr<scalar_t>());
dst_iterator dsts(data, dst_idxs);
src_iterator srcs(dst_idxs, dst_idx_to_src_functor<scalar_t>(input));
thrust::copy_n(policy, srcs, n, dsts);
});
}
// NOTE [ cuFFT Embedded Strides ]
//
// cuFFT supports a subset of arbitrary strides via their "advanced data layout"
// option (http://docs.nvidia.com/cuda/cufft/index.html#advanced-data-layout).
// Specifically, these are tensors that can be viewed as subtensors resulted
// from slicing a larger contiguous tensors. For such input tensors, let the
// sizes of the enclosing tensor be `inembed`, and we can have in 3d case:
//
// input[x, y, z] = input[((x * inembed[1] + y) * inembed[2] + z)]
//
// Above is the simplified formula ignoring the batch dimension. In fact, the
// last dimension of the enclosing tensor doesn't have to be contiguous, i.e.,
// it can be greater than 1. Then one can set the base stride for the enclosing
// tensor with `istride`. Then we have
//
// input[x, y, z] = input[((x * inembed[1] + y) * inembed[2] + z) * istride]
//
// For example, consider
//
// enclosing = torch.zeros(6, 8, 10) # contiguous
// input = enclosing[:4, 2:6, 6:]
// input.size() # [ 4, 4, 4]
// input.stride() # [80, 10, 1]
// # inembed = [6, 8, 10]
// input[2, 1, 3] = input[((2 * 8) + 1) * 10 + 3] # using above formula
// = input[173]
// = input[2 * 80 + 1 * 10 + 1 * 3] # using strides directly
//
// Generally, the embedded strides can be computed as
//
// embed[i] = stride[i - 1] / stride[i].
//
// Note that the value of embed[0] isn't used to compute indices and doesn't
// matter.
//
// Contrary to advanced data layout, simple layout means that *embeds have
// unit-strides. In particular, unit-stride refers to that the input and output
// tensors being contiguous, and that the strides at the innermost signal
// dimension being unit (1) w.r.t. the corresponding data type.
static inline Tensor _run_cufft(
const CuFFTConfig &config, Tensor& input, int64_t signal_ndim,
bool complex_input, bool complex_output, bool inverse,
IntArrayRef checked_signal_sizes, bool normalized, bool onesided,
IntArrayRef output_sizes, bool input_was_cloned
) {
if (config.should_clone_input() && !input_was_cloned) {
input = input.clone(at::MemoryFormat::Contiguous);
}
auto& plan = config.plan();
auto& ctx = at::globalContext();
// set output
auto output = at::empty(output_sizes, input.options());
// set to current stream
CUFFT_CHECK(hipfftSetStream(plan, at::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
auto ws = at::empty({ config.workspace_size() }, at::device(at::kCUDA).dtype(at::kByte));
CUFFT_CHECK(hipfftSetWorkArea(plan, ws.data_ptr()));
// run
#ifdef __HIP_PLATFORM_HCC__
if (input.scalar_type() == ScalarType::Float) {
if (complex_input && complex_output) {
CUFFT_CHECK(hipfftExecC2C(plan, static_cast<hipfftComplex*>(input.data_ptr()),
static_cast<hipfftComplex*>(output.data_ptr()),
inverse ? HIPFFT_BACKWARD : HIPFFT_FORWARD));
} else if (complex_input && !complex_output) {
CUFFT_CHECK(hipfftExecC2R(plan, static_cast<hipfftComplex*>(input.data_ptr()),
static_cast<hipfftReal*>(output.data_ptr())));
} else if (!complex_input && complex_output) {
CUFFT_CHECK(hipfftExecR2C(plan, static_cast<hipfftReal*>(input.data_ptr()),
static_cast<hipfftComplex*>(output.data_ptr())));
} else {
AT_ERROR("hipFFT doesn't support r2r (float)");
}
} else if (input.scalar_type() == ScalarType::Double) {
if (complex_input && complex_output) {
CUFFT_CHECK(hipfftExecZ2Z(plan, static_cast<hipfftDoubleComplex*>(input.data_ptr()),
static_cast<hipfftDoubleComplex*>(output.data_ptr()),
inverse ? HIPFFT_BACKWARD : HIPFFT_FORWARD));
} else if (complex_input && !complex_output) {
CUFFT_CHECK(hipfftExecZ2D(plan, static_cast<hipfftDoubleComplex*>(input.data_ptr()),
static_cast<hipfftDoubleReal*>(output.data_ptr())));
} else if (!complex_input && complex_output) {
CUFFT_CHECK(hipfftExecD2Z(plan, static_cast<hipfftDoubleReal*>(input.data_ptr()),
static_cast<hipfftDoubleComplex*>(output.data_ptr())));
} else {
AT_ERROR("hipFFT doesn't support r2r (double)");
}
} else {
std::ostringstream ss;
ss << "hipFFT doesn't support tensor of type: "
<< toString(input.scalar_type());
AT_ERROR(ss.str());
}
#else
CUFFT_CHECK(cufftXtExec(plan, input.data_ptr(), output.data_ptr(),
inverse ? HIPFFT_BACKWARD : HIPFFT_FORWARD));
#endif
// rescale if needed by normalized flag or inverse transform
auto size_last_signal_dim = checked_signal_sizes[signal_ndim - 1];
if (normalized || inverse) {
auto signal_numel = at::prod_intlist(checked_signal_sizes);
double scale_denom;
if (normalized) {
scale_denom = std::sqrt(static_cast<double>(signal_numel));
} else {
scale_denom = static_cast<double>(signal_numel);
}
if (!complex_input && complex_output && !onesided) {
auto end_data_slice = infer_ft_real_to_complex_onesided_size(size_last_signal_dim);
output.narrow(signal_ndim, 0, end_data_slice).div_(scale_denom);
} else {
output.div_(scale_denom);
}
}
// if needed, fill out the other half using conjugate symmetry
if (!complex_input && complex_output && !onesided) {
auto start_slice = infer_ft_real_to_complex_onesided_size(size_last_signal_dim);
_fft_fill_with_conjugate_symmetry_(output, size_last_signal_dim, start_slice);
}
return output;
}
// The cuFFT plan cache
// unique_ptr for nullability and to avoid reference invalidation on vector resize
static std::vector<std::unique_ptr<CuFFTParamsLRUCache>> plan_caches;
static std::mutex plan_caches_mutex;
static inline
CuFFTParamsLRUCache &cufft_get_plan_cache(int64_t device_index) {
std::lock_guard<std::mutex> guard(plan_caches_mutex);
AT_ASSERT(device_index >= 0);
if (device_index >= plan_caches.size()) {
plan_caches.resize(device_index + 1);
}
if (!plan_caches[device_index]) {
plan_caches[device_index] = std::make_unique<CuFFTParamsLRUCache>();
}
return *plan_caches[device_index];
}
namespace detail {
int64_t cufft_get_plan_cache_max_size_impl(int64_t device_index) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_get_plan_cache_max_size: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).max_size();
}
void cufft_set_plan_cache_max_size_impl(int64_t device_index, int64_t max_size) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_set_plan_cache_max_size: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).resize(max_size);
}
int64_t cufft_get_plan_cache_size_impl(int64_t device_index) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_get_plan_cache_size: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).size();
}
void cufft_clear_plan_cache_impl(int64_t device_index) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_clear_plan_cache: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).clear();
}
} // namespace at::native::detail
// cuFFT
// Currently not utilizing multi GPUs so this can be potentially sped up.
Tensor _fft_cufft(const Tensor& self, int64_t signal_ndim,
bool complex_input, bool complex_output, bool inverse,
IntArrayRef checked_signal_sizes, bool normalized, bool onesided,
IntArrayRef output_sizes) {
CuFFTParamsLRUCache& plan_cache = cufft_get_plan_cache(self.device().index());
Tensor input = self;
bool input_was_cloned = false;
// Slice when twosided complex-to-real. This is not always needed because we
// calculate the inembed. But it will benefit us in certain cases where we
// clone the input tensor.
//
// See NOTE [ cuFFT Embedded Strides ].
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
if (complex_input && !complex_output && !onesided) {
auto onesided_size = infer_ft_real_to_complex_onesided_size(checked_signal_sizes[signal_ndim - 1]);
input = input.narrow(signal_ndim, 0, onesided_size);
}
// cuFFT requires input and output data pointers to complex type aligned.
// Our newly allocated output tensor is always 512 bytes aligned so it is fine
// (see kRoundSmall and kRoundLarge in THCCachingAllocator.cpp), but we do
// need to check input tensor to make sure that it is not unaligned, e.g.,
// from a slicing.
auto complex_size_bytes = 2 * input.element_size();
if (reinterpret_cast<std::uintptr_t>(input.data_ptr()) % complex_size_bytes != 0) {
input = input.clone(at::MemoryFormat::Contiguous);
input_was_cloned = true;
}
// Now that we have done error check and data_ptr checks, we delegate all
// further cuFFT parameter computation and plan creation to the helper class
// CuFFTConfig in CuFFTUtils.h.
// If plan caching is enabled, we check the cache. Note that this accesses
// plan_cache.max_size() and thus makes this function less functional.
// However, integrating additional arguments into the "public" level c++ APIs,
// e.g., irfft, is difficult as we have a long call sequence looking like
// irfft --> _fft --> _fft_with_size --dispatching-to-> _fft_cufft
// This read is not locked for perf reason. Shouldn't matter too much because
// we check again after acquiring the lock.
if (plan_cache.max_size() > 0) {
CuFFTParams params;
setCuFFTParams(¶ms, input, signal_ndim, complex_input,
complex_output, checked_signal_sizes, onesided);
std::lock_guard<std::mutex> guard(plan_cache.mutex);
if (plan_cache.max_size() > 0) { // check again after acquiring the lock
const CuFFTConfig &config = plan_cache.try_emplace_value(std::move(params),
input, signal_ndim, complex_input,
complex_output, checked_signal_sizes,
onesided, output_sizes);
return _run_cufft(config, input, signal_ndim, complex_input,
complex_output, inverse, checked_signal_sizes, normalized,
onesided, output_sizes, input_was_cloned);
}
}
CuFFTConfig config(input, signal_ndim, complex_input, complex_output,
checked_signal_sizes, onesided, output_sizes);
return _run_cufft(config, input, signal_ndim, complex_input,
complex_output, inverse, checked_signal_sizes, normalized,
onesided, output_sizes, input_was_cloned);
}
}} // at::native
| b849042f2f7086f5be6cb1ec6406c46138f7d367.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Config.h>
#include <ATen/Dispatch.h>
#include <ATen/Utils.h>
#include <ATen/NativeFunctions.h>
#include <ATen/detail/CUDAHooksInterface.h>
#include <ATen/native/SpectralOpsUtils.h>
#include <ATen/native/cuda/CuFFTUtils.h>
#include <ATen/native/cuda/CuFFTPlanCache.h>
#include <THC/THCTensorSort.cuh>
#include <THC/THCThrustAllocator.cuh>
#include <thrust/execution_policy.h>
#include <thrust/unique.h>
#include <cufft.h>
#include <cufftXt.h>
#include <vector>
#include <cmath>
namespace at { namespace native {
using namespace at::native::detail;
// In real-to-complex transform, cuFFT only fills half of the values due to
// conjugate symmetry. See native/SpectralUtils.h for more details.
// The following structs are used to fill in the other half with symmetry in
// case of real-to-complex transform with onesided=False flag.
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
// counting_iterator => index to fill
struct cnt_to_dst_idx_functor : public thrust::unary_function<int64_t, int64_t>
{
int64_t last_dim_size;
int64_t last_dim_start_slice;
int64_t last_dim_to_fill_size;
cnt_to_dst_idx_functor(int64_t last_dim_size, int64_t last_dim_start_slice) :
last_dim_size(last_dim_size), last_dim_start_slice(last_dim_start_slice),
last_dim_to_fill_size(last_dim_size - last_dim_start_slice) {}
// HIP wants __host__ __device__ tag, CUDA does not
#ifdef __HIP_PLATFORM_HCC__
__host__ __device__
#endif
cnt_to_dst_idx_functor & operator=(const cnt_to_dst_idx_functor&) = default;
__host__ __device__ __forceinline__
int64_t operator()(const int64_t& i) const
{
int64_t imag = i % 2;
int64_t idx = i / 2;
int64_t num_dim = idx / last_dim_to_fill_size;
int64_t slice_idx = idx % last_dim_to_fill_size;
return (num_dim * last_dim_size + last_dim_start_slice + slice_idx) * 2 + imag;
}
};
// index to fill => index to read from
template <typename scalar_t>
struct dst_idx_to_src_functor : public thrust::unary_function<int64_t, scalar_t>
{
// output can have at most dim 5 (batch + 3 signal dim + real/imag)
int64_t sizes[max_rank + 2], strides[max_rank + 2];
const int64_t signal_ndim;
scalar_t *data; // device ptr
dst_idx_to_src_functor(const Tensor& batched_complex_signal)
: signal_ndim(batched_complex_signal.dim() - 1),
data(batched_complex_signal.data_ptr<scalar_t>()) {
for (int64_t i = 0; i < signal_ndim; i++) {
sizes[i] = batched_complex_signal.size(i);
strides[i] = batched_complex_signal.stride(i);
}
}
__device__ __forceinline__
scalar_t operator()(const int64_t& write_idx_with_imag) const
{
int64_t imag = write_idx_with_imag % 2;
// all but first (batch) and last (real/imag) dims need to be reflected
int64_t read_idx = 0;
int64_t remainder = write_idx_with_imag - imag;
int64_t dim_idx, dim_stride;
for (int64_t i = 0; i < signal_ndim; i++) {
dim_stride = strides[i];
dim_idx = remainder / dim_stride;
if (i == 0) {
read_idx += dim_idx * dim_stride;
} else if (dim_idx != 0) {
read_idx += (sizes[i] - dim_idx) * dim_stride;
}
remainder = remainder % dim_stride;
}
if (imag) {
return -data[read_idx + 1];
} else {
return data[read_idx];
}
}
};
// input should be a contiguous batched tensor of same size as full (twosided)
// signals, but only contains half (onesided) of the values.
// This function modifies inplace.
__forceinline__
static void _fft_fill_with_conjugate_symmetry_(Tensor& input,
int64_t size_last_dim, int64_t last_dim_start_slice) {
if (last_dim_start_slice >= size_last_dim) {
return;
}
// copy
int64_t n = input.numel() / size_last_dim * (size_last_dim - last_dim_start_slice);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "_fft_fill_with_conjugate_symmetry_", [&] {
typedef thrust::device_ptr<scalar_t> device_ptr;
typedef thrust::counting_iterator<int64_t> counter;
typedef thrust::transform_iterator<cnt_to_dst_idx_functor, counter> dst_idx_iterator;
typedef thrust::permutation_iterator<device_ptr, dst_idx_iterator> dst_iterator;
typedef thrust::transform_iterator<dst_idx_to_src_functor<scalar_t>, dst_idx_iterator> src_iterator;
dst_idx_iterator dst_idxs(counter(0), cnt_to_dst_idx_functor(size_last_dim, last_dim_start_slice));
auto data = device_ptr(input.data_ptr<scalar_t>());
dst_iterator dsts(data, dst_idxs);
src_iterator srcs(dst_idxs, dst_idx_to_src_functor<scalar_t>(input));
thrust::copy_n(policy, srcs, n, dsts);
});
}
// NOTE [ cuFFT Embedded Strides ]
//
// cuFFT supports a subset of arbitrary strides via their "advanced data layout"
// option (http://docs.nvidia.com/cuda/cufft/index.html#advanced-data-layout).
// Specifically, these are tensors that can be viewed as subtensors resulted
// from slicing a larger contiguous tensors. For such input tensors, let the
// sizes of the enclosing tensor be `inembed`, and we can have in 3d case:
//
// input[x, y, z] = input[((x * inembed[1] + y) * inembed[2] + z)]
//
// Above is the simplified formula ignoring the batch dimension. In fact, the
// last dimension of the enclosing tensor doesn't have to be contiguous, i.e.,
// it can be greater than 1. Then one can set the base stride for the enclosing
// tensor with `istride`. Then we have
//
// input[x, y, z] = input[((x * inembed[1] + y) * inembed[2] + z) * istride]
//
// For example, consider
//
// enclosing = torch.zeros(6, 8, 10) # contiguous
// input = enclosing[:4, 2:6, 6:]
// input.size() # [ 4, 4, 4]
// input.stride() # [80, 10, 1]
// # inembed = [6, 8, 10]
// input[2, 1, 3] = input[((2 * 8) + 1) * 10 + 3] # using above formula
// = input[173]
// = input[2 * 80 + 1 * 10 + 1 * 3] # using strides directly
//
// Generally, the embedded strides can be computed as
//
// embed[i] = stride[i - 1] / stride[i].
//
// Note that the value of embed[0] isn't used to compute indices and doesn't
// matter.
//
// Contrary to advanced data layout, simple layout means that *embeds have
// unit-strides. In particular, unit-stride refers to that the input and output
// tensors being contiguous, and that the strides at the innermost signal
// dimension being unit (1) w.r.t. the corresponding data type.
static inline Tensor _run_cufft(
const CuFFTConfig &config, Tensor& input, int64_t signal_ndim,
bool complex_input, bool complex_output, bool inverse,
IntArrayRef checked_signal_sizes, bool normalized, bool onesided,
IntArrayRef output_sizes, bool input_was_cloned
) {
if (config.should_clone_input() && !input_was_cloned) {
input = input.clone(at::MemoryFormat::Contiguous);
}
auto& plan = config.plan();
auto& ctx = at::globalContext();
// set output
auto output = at::empty(output_sizes, input.options());
// set to current stream
CUFFT_CHECK(cufftSetStream(plan, at::cuda::getCurrentCUDAStream()));
auto ws = at::empty({ config.workspace_size() }, at::device(at::kCUDA).dtype(at::kByte));
CUFFT_CHECK(cufftSetWorkArea(plan, ws.data_ptr()));
// run
#ifdef __HIP_PLATFORM_HCC__
if (input.scalar_type() == ScalarType::Float) {
if (complex_input && complex_output) {
CUFFT_CHECK(hipfftExecC2C(plan, static_cast<hipfftComplex*>(input.data_ptr()),
static_cast<hipfftComplex*>(output.data_ptr()),
inverse ? HIPFFT_BACKWARD : HIPFFT_FORWARD));
} else if (complex_input && !complex_output) {
CUFFT_CHECK(hipfftExecC2R(plan, static_cast<hipfftComplex*>(input.data_ptr()),
static_cast<hipfftReal*>(output.data_ptr())));
} else if (!complex_input && complex_output) {
CUFFT_CHECK(hipfftExecR2C(plan, static_cast<hipfftReal*>(input.data_ptr()),
static_cast<hipfftComplex*>(output.data_ptr())));
} else {
AT_ERROR("hipFFT doesn't support r2r (float)");
}
} else if (input.scalar_type() == ScalarType::Double) {
if (complex_input && complex_output) {
CUFFT_CHECK(hipfftExecZ2Z(plan, static_cast<hipfftDoubleComplex*>(input.data_ptr()),
static_cast<hipfftDoubleComplex*>(output.data_ptr()),
inverse ? HIPFFT_BACKWARD : HIPFFT_FORWARD));
} else if (complex_input && !complex_output) {
CUFFT_CHECK(hipfftExecZ2D(plan, static_cast<hipfftDoubleComplex*>(input.data_ptr()),
static_cast<hipfftDoubleReal*>(output.data_ptr())));
} else if (!complex_input && complex_output) {
CUFFT_CHECK(hipfftExecD2Z(plan, static_cast<hipfftDoubleReal*>(input.data_ptr()),
static_cast<hipfftDoubleComplex*>(output.data_ptr())));
} else {
AT_ERROR("hipFFT doesn't support r2r (double)");
}
} else {
std::ostringstream ss;
ss << "hipFFT doesn't support tensor of type: "
<< toString(input.scalar_type());
AT_ERROR(ss.str());
}
#else
CUFFT_CHECK(cufftXtExec(plan, input.data_ptr(), output.data_ptr(),
inverse ? CUFFT_INVERSE : CUFFT_FORWARD));
#endif
// rescale if needed by normalized flag or inverse transform
auto size_last_signal_dim = checked_signal_sizes[signal_ndim - 1];
if (normalized || inverse) {
auto signal_numel = at::prod_intlist(checked_signal_sizes);
double scale_denom;
if (normalized) {
scale_denom = std::sqrt(static_cast<double>(signal_numel));
} else {
scale_denom = static_cast<double>(signal_numel);
}
if (!complex_input && complex_output && !onesided) {
auto end_data_slice = infer_ft_real_to_complex_onesided_size(size_last_signal_dim);
output.narrow(signal_ndim, 0, end_data_slice).div_(scale_denom);
} else {
output.div_(scale_denom);
}
}
// if needed, fill out the other half using conjugate symmetry
if (!complex_input && complex_output && !onesided) {
auto start_slice = infer_ft_real_to_complex_onesided_size(size_last_signal_dim);
_fft_fill_with_conjugate_symmetry_(output, size_last_signal_dim, start_slice);
}
return output;
}
// The cuFFT plan cache
// unique_ptr for nullability and to avoid reference invalidation on vector resize
static std::vector<std::unique_ptr<CuFFTParamsLRUCache>> plan_caches;
static std::mutex plan_caches_mutex;
static inline
CuFFTParamsLRUCache &cufft_get_plan_cache(int64_t device_index) {
std::lock_guard<std::mutex> guard(plan_caches_mutex);
AT_ASSERT(device_index >= 0);
if (device_index >= plan_caches.size()) {
plan_caches.resize(device_index + 1);
}
if (!plan_caches[device_index]) {
plan_caches[device_index] = std::make_unique<CuFFTParamsLRUCache>();
}
return *plan_caches[device_index];
}
namespace detail {
int64_t cufft_get_plan_cache_max_size_impl(int64_t device_index) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_get_plan_cache_max_size: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).max_size();
}
void cufft_set_plan_cache_max_size_impl(int64_t device_index, int64_t max_size) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_set_plan_cache_max_size: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).resize(max_size);
}
int64_t cufft_get_plan_cache_size_impl(int64_t device_index) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_get_plan_cache_size: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).size();
}
void cufft_clear_plan_cache_impl(int64_t device_index) {
TORCH_CHECK(0 <= device_index && device_index < at::detail::getCUDAHooks().getNumGPUs(),
"cufft_clear_plan_cache: expected 0 <= device_index < ",
at::detail::getCUDAHooks().getNumGPUs(), "], but got device_index=",
device_index);
return cufft_get_plan_cache(device_index).clear();
}
} // namespace at::native::detail
// cuFFT
// Currently not utilizing multi GPUs so this can be potentially sped up.
Tensor _fft_cufft(const Tensor& self, int64_t signal_ndim,
bool complex_input, bool complex_output, bool inverse,
IntArrayRef checked_signal_sizes, bool normalized, bool onesided,
IntArrayRef output_sizes) {
CuFFTParamsLRUCache& plan_cache = cufft_get_plan_cache(self.device().index());
Tensor input = self;
bool input_was_cloned = false;
// Slice when twosided complex-to-real. This is not always needed because we
// calculate the inembed. But it will benefit us in certain cases where we
// clone the input tensor.
//
// See NOTE [ cuFFT Embedded Strides ].
// See NOTE [ Fourier Transform Conjugate Symmetry ] in native/SpectralOpsUtils.h.
if (complex_input && !complex_output && !onesided) {
auto onesided_size = infer_ft_real_to_complex_onesided_size(checked_signal_sizes[signal_ndim - 1]);
input = input.narrow(signal_ndim, 0, onesided_size);
}
// cuFFT requires input and output data pointers to complex type aligned.
// Our newly allocated output tensor is always 512 bytes aligned so it is fine
// (see kRoundSmall and kRoundLarge in THCCachingAllocator.cpp), but we do
// need to check input tensor to make sure that it is not unaligned, e.g.,
// from a slicing.
auto complex_size_bytes = 2 * input.element_size();
if (reinterpret_cast<std::uintptr_t>(input.data_ptr()) % complex_size_bytes != 0) {
input = input.clone(at::MemoryFormat::Contiguous);
input_was_cloned = true;
}
// Now that we have done error check and data_ptr checks, we delegate all
// further cuFFT parameter computation and plan creation to the helper class
// CuFFTConfig in CuFFTUtils.h.
// If plan caching is enabled, we check the cache. Note that this accesses
// plan_cache.max_size() and thus makes this function less functional.
// However, integrating additional arguments into the "public" level c++ APIs,
// e.g., irfft, is difficult as we have a long call sequence looking like
// irfft --> _fft --> _fft_with_size --dispatching-to-> _fft_cufft
// This read is not locked for perf reason. Shouldn't matter too much because
// we check again after acquiring the lock.
if (plan_cache.max_size() > 0) {
CuFFTParams params;
setCuFFTParams(¶ms, input, signal_ndim, complex_input,
complex_output, checked_signal_sizes, onesided);
std::lock_guard<std::mutex> guard(plan_cache.mutex);
if (plan_cache.max_size() > 0) { // check again after acquiring the lock
const CuFFTConfig &config = plan_cache.try_emplace_value(std::move(params),
input, signal_ndim, complex_input,
complex_output, checked_signal_sizes,
onesided, output_sizes);
return _run_cufft(config, input, signal_ndim, complex_input,
complex_output, inverse, checked_signal_sizes, normalized,
onesided, output_sizes, input_was_cloned);
}
}
CuFFTConfig config(input, signal_ndim, complex_input, complex_output,
checked_signal_sizes, onesided, output_sizes);
return _run_cufft(config, input, signal_ndim, complex_input,
complex_output, inverse, checked_signal_sizes, normalized,
onesided, output_sizes, input_was_cloned);
}
}} // at::native
|
d64c1608c9dfbe65032c3e4599d6a60c5824491e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Record the force on an atom to global memory.
*/
inline __device__ void storeForce(int atom, real3 force, unsigned long long* __restrict__ forceBuffers) {
atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
}
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* Compute the difference between two vectors, taking periodic boundary conditions into account
* and setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real3 vec1, real3 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
#ifdef USE_PERIODIC
result.x -= floor(result.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
result.y -= floor(result.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
result.z -= floor(result.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
__device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 cp = cross(vec1, vec2);
return make_real4(cp.x, cp.y, cp.z, cp.x*cp.x+cp.y*cp.y+cp.z*cp.z);
}
/**
* Determine whether a particular interaction is in the list of exclusions.
*/
inline __device__ bool isInteractionExcluded(int atom1, int atom2, int* __restrict__ exclusions, int* __restrict__ exclusionStartIndex) {
int first = exclusionStartIndex[atom1];
int last = exclusionStartIndex[atom1+1];
for (int i = last-1; i >= first; i--) {
int excluded = exclusions[i];
if (excluded == atom2)
return true;
if (excluded <= atom1)
return false;
}
return false;
}
__constant__ float globals[NUM_GLOBALS];
/**
* Compute the interaction.
*/
extern "C" __global__ void computeInteraction(
unsigned long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq,
real4 periodicBoxSize, real4 invPeriodicBoxSize
#ifdef USE_CUTOFF
, const int* __restrict__ neighbors, const int* __restrict__ neighborStartIndex
#endif
#ifdef USE_FILTERS
, int* __restrict__ particleTypes, int* __restrict__ orderIndex, int* __restrict__ particleOrder
#endif
#ifdef USE_EXCLUSIONS
, int* __restrict__ exclusions, int* __restrict__ exclusionStartIndex
#endif
PARAMETER_ARGUMENTS) {
real energy = 0.0f;
// Loop over particles to be the first one in the set.
for (int p1 = blockIdx.x; p1 < NUM_ATOMS; p1 += gridDim.x) {
#ifdef USE_CENTRAL_PARTICLE
const int a1 = p1;
#else
const int a1 = 0;
#endif
#ifdef USE_CUTOFF
int firstNeighbor = neighborStartIndex[p1];
int numNeighbors = neighborStartIndex[p1+1]-firstNeighbor;
#else
#ifdef USE_CENTRAL_PARTICLE
int numNeighbors = NUM_ATOMS;
#else
int numNeighbors = NUM_ATOMS-p1-1;
#endif
#endif
int numCombinations = NUM_CANDIDATE_COMBINATIONS;
for (int index = threadIdx.x; index < numCombinations; index += blockDim.x) {
FIND_ATOMS_FOR_COMBINATION_INDEX;
bool includeInteraction = IS_VALID_COMBINATION;
#ifdef USE_CUTOFF
if (includeInteraction) {
VERIFY_CUTOFF;
}
#endif
#ifdef USE_FILTERS
int order = orderIndex[COMPUTE_TYPE_INDEX];
if (order == -1)
includeInteraction = false;
#endif
#ifdef USE_EXCLUSIONS
if (includeInteraction) {
VERIFY_EXCLUSIONS;
}
#endif
if (includeInteraction) {
PERMUTE_ATOMS;
LOAD_PARTICLE_DATA;
COMPUTE_INTERACTION;
}
}
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(real4 periodicBoxSize, real4 invPeriodicBoxSize, const real4* __restrict__ posq,
real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ numNeighborPairs) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < NUM_ATOMS) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
pos.x -= floor(pos.x*invPeriodicBoxSize.x)*periodicBoxSize.x;
pos.y -= floor(pos.y*invPeriodicBoxSize.y)*periodicBoxSize.y;
pos.z -= floor(pos.z*invPeriodicBoxSize.z)*periodicBoxSize.z;
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, NUM_ATOMS);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
pos.x -= floor((pos.x-center.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-center.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-center.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
blockBoundingBox[index] = blockSize;
blockCenter[index] = 0.5f*(maxPos+minPos);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
*numNeighborPairs = 0;
}
/**
* Find a list of neighbors for each atom.
*/
extern "C" __global__ void findNeighbors(real4 periodicBoxSize, real4 invPeriodicBoxSize, const real4* __restrict__ posq,
const real4* __restrict__ blockCenter, const real4* __restrict__ blockBoundingBox, int2* __restrict__ neighborPairs,
int* __restrict__ numNeighborPairs, int* __restrict__ numNeighborsForAtom, int maxNeighborPairs
#ifdef USE_EXCLUSIONS
, int* __restrict__ exclusions, int* __restrict__ exclusionStartIndex
#endif
) {
__shared__ real3 positionCache[FIND_NEIGHBORS_WORKGROUP_SIZE];
int indexInWarp = threadIdx.x%32;
for (int atom1 = blockIdx.x*blockDim.x+threadIdx.x; atom1 < PADDED_NUM_ATOMS; atom1 += blockDim.x*gridDim.x) {
// Load data for this atom. Note that all threads in a warp are processing atoms from the same block.
real3 pos1 = trim(posq[atom1]);
int block1 = atom1/TILE_SIZE;
real4 blockCenter1 = blockCenter[block1];
real4 blockSize1 = blockBoundingBox[block1];
int totalNeighborsForAtom1 = 0;
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
#ifdef USE_CENTRAL_PARTICLE
int startBlock = 0;
#else
int startBlock = block1;
#endif
for (int block2Base = startBlock; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenter2 = blockCenter[block2];
real4 blockSize2 = blockBoundingBox[block2];
real4 blockDelta = blockCenter1-blockCenter2;
#ifdef USE_PERIODIC
blockDelta.x -= floor(blockDelta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
blockDelta.y -= floor(blockDelta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
blockDelta.z -= floor(blockDelta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSize1.x-blockSize2.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSize1.y-blockSize2.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSize1.z-blockSize2.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < CUTOFF_SQUARED);
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = __ballot(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
int block2 = block2Base+i;
// Loop over atoms in this block.
int start = block2*TILE_SIZE;
int included[TILE_SIZE];
int numIncluded = 0;
positionCache[threadIdx.x] = trim(posq[start+indexInWarp]);
if (atom1 < NUM_ATOMS) {
for (int j = 0; j < 32; j++) {
int atom2 = start+j;
real3 pos2 = positionCache[threadIdx.x-indexInWarp+j];
// Decide whether to include this atom pair in the neighbor list.
real4 atomDelta = delta(pos1, pos2, periodicBoxSize, invPeriodicBoxSize);
#ifdef USE_CENTRAL_PARTICLE
bool includeAtom = (atom2 != atom1 && atom2 < NUM_ATOMS && atomDelta.w < CUTOFF_SQUARED);
#else
bool includeAtom = (atom2 > atom1 && atom2 < NUM_ATOMS && atomDelta.w < CUTOFF_SQUARED);
#endif
#ifdef USE_EXCLUSIONS
if (includeAtom)
includeAtom &= !isInteractionExcluded(atom1, atom2, exclusions, exclusionStartIndex);
#endif
if (includeAtom)
included[numIncluded++] = atom2;
}
}
// If we found any neighbors, store them to the neighbor list.
if (numIncluded > 0) {
int baseIndex = atomicAdd(numNeighborPairs, numIncluded);
if (baseIndex+numIncluded <= maxNeighborPairs)
for (int j = 0; j < numIncluded; j++)
neighborPairs[baseIndex+j] = make_int2(atom1, included[j]);
totalNeighborsForAtom1 += numIncluded;
}
}
}
numNeighborsForAtom[atom1] = totalNeighborsForAtom1;
}
}
/**
* Sum the neighbor counts to compute the start position of each atom. This kernel
* is executed as a single work group.
*/
extern "C" __global__ void computeNeighborStartIndices(int* __restrict__ numNeighborsForAtom, int* __restrict__ neighborStartIndex,
int* __restrict__ numNeighborPairs, int maxNeighborPairs) {
extern __shared__ unsigned int posBuffer[];
if (*numNeighborPairs > maxNeighborPairs) {
// There wasn't enough memory for the neighbor list, so we'll need to rebuild it. Set the neighbor start
// indices to indicate no neighbors for any atom.
for (int i = threadIdx.x; i <= NUM_ATOMS; i += blockDim.x)
neighborStartIndex[i] = 0;
return;
}
unsigned int globalOffset = 0;
for (unsigned int startAtom = 0; startAtom < NUM_ATOMS; startAtom += blockDim.x) {
// Load the neighbor counts into local memory.
unsigned int globalIndex = startAtom+threadIdx.x;
posBuffer[threadIdx.x] = (globalIndex < NUM_ATOMS ? numNeighborsForAtom[globalIndex] : 0);
__syncthreads();
// Perform a parallel prefix sum.
for (unsigned int step = 1; step < blockDim.x; step *= 2) {
unsigned int add = (threadIdx.x >= step ? posBuffer[threadIdx.x-step] : 0);
__syncthreads();
posBuffer[threadIdx.x] += add;
__syncthreads();
}
// Write the results back to global memory.
if (globalIndex < NUM_ATOMS) {
neighborStartIndex[globalIndex+1] = posBuffer[threadIdx.x]+globalOffset;
numNeighborsForAtom[globalIndex] = 0; // Clear this so the next kernel can use it as a counter
}
globalOffset += posBuffer[blockDim.x-1];
}
if (threadIdx.x == 0)
neighborStartIndex[0] = 0;
}
/**
* Assemble the final neighbor list.
*/
extern "C" __global__ void copyPairsToNeighborList(const int2* __restrict__ neighborPairs, int* __restrict__ neighbors, int* __restrict__ numNeighborPairs,
int maxNeighborPairs, int* __restrict__ numNeighborsForAtom, const int* __restrict__ neighborStartIndex) {
int actualPairs = *numNeighborPairs;
if (actualPairs > maxNeighborPairs)
return; // There wasn't enough memory for the neighbor list, so we'll need to rebuild it.
for (unsigned int index = blockDim.x*blockIdx.x+threadIdx.x; index < actualPairs; index += blockDim.x*gridDim.x) {
int2 pair = neighborPairs[index];
int startIndex = neighborStartIndex[pair.x];
int offset = atomicAdd(numNeighborsForAtom+pair.x, 1);
neighbors[startIndex+offset] = pair.y;
}
}
| d64c1608c9dfbe65032c3e4599d6a60c5824491e.cu | /**
* Record the force on an atom to global memory.
*/
inline __device__ void storeForce(int atom, real3 force, unsigned long long* __restrict__ forceBuffers) {
atomicAdd(&forceBuffers[atom], static_cast<unsigned long long>((long long) (force.x*0x100000000)));
atomicAdd(&forceBuffers[atom+PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.y*0x100000000)));
atomicAdd(&forceBuffers[atom+2*PADDED_NUM_ATOMS], static_cast<unsigned long long>((long long) (force.z*0x100000000)));
}
/**
* Convert a real4 to a real3 by removing its last element.
*/
inline __device__ real3 trim(real4 v) {
return make_real3(v.x, v.y, v.z);
}
/**
* Compute the difference between two vectors, taking periodic boundary conditions into account
* and setting the fourth component to the squared magnitude.
*/
inline __device__ real4 delta(real3 vec1, real3 vec2, real4 periodicBoxSize, real4 invPeriodicBoxSize) {
real4 result = make_real4(vec1.x-vec2.x, vec1.y-vec2.y, vec1.z-vec2.z, 0.0f);
#ifdef USE_PERIODIC
result.x -= floor(result.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
result.y -= floor(result.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
result.z -= floor(result.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
result.w = result.x*result.x + result.y*result.y + result.z*result.z;
return result;
}
/**
* Compute the angle between two vectors. The w component of each vector should contain the squared magnitude.
*/
__device__ real computeAngle(real4 vec1, real4 vec2) {
real dotProduct = vec1.x*vec2.x + vec1.y*vec2.y + vec1.z*vec2.z;
real cosine = dotProduct*RSQRT(vec1.w*vec2.w);
real angle;
if (cosine > 0.99f || cosine < -0.99f) {
// We're close to the singularity in acos(), so take the cross product and use asin() instead.
real3 crossProduct = cross(vec1, vec2);
real scale = vec1.w*vec2.w;
angle = ASIN(SQRT(dot(crossProduct, crossProduct)/scale));
if (cosine < 0.0f)
angle = M_PI-angle;
}
else
angle = ACOS(cosine);
return angle;
}
/**
* Compute the cross product of two vectors, setting the fourth component to the squared magnitude.
*/
inline __device__ real4 computeCross(real4 vec1, real4 vec2) {
real3 cp = cross(vec1, vec2);
return make_real4(cp.x, cp.y, cp.z, cp.x*cp.x+cp.y*cp.y+cp.z*cp.z);
}
/**
* Determine whether a particular interaction is in the list of exclusions.
*/
inline __device__ bool isInteractionExcluded(int atom1, int atom2, int* __restrict__ exclusions, int* __restrict__ exclusionStartIndex) {
int first = exclusionStartIndex[atom1];
int last = exclusionStartIndex[atom1+1];
for (int i = last-1; i >= first; i--) {
int excluded = exclusions[i];
if (excluded == atom2)
return true;
if (excluded <= atom1)
return false;
}
return false;
}
__constant__ float globals[NUM_GLOBALS];
/**
* Compute the interaction.
*/
extern "C" __global__ void computeInteraction(
unsigned long long* __restrict__ forceBuffers, real* __restrict__ energyBuffer, const real4* __restrict__ posq,
real4 periodicBoxSize, real4 invPeriodicBoxSize
#ifdef USE_CUTOFF
, const int* __restrict__ neighbors, const int* __restrict__ neighborStartIndex
#endif
#ifdef USE_FILTERS
, int* __restrict__ particleTypes, int* __restrict__ orderIndex, int* __restrict__ particleOrder
#endif
#ifdef USE_EXCLUSIONS
, int* __restrict__ exclusions, int* __restrict__ exclusionStartIndex
#endif
PARAMETER_ARGUMENTS) {
real energy = 0.0f;
// Loop over particles to be the first one in the set.
for (int p1 = blockIdx.x; p1 < NUM_ATOMS; p1 += gridDim.x) {
#ifdef USE_CENTRAL_PARTICLE
const int a1 = p1;
#else
const int a1 = 0;
#endif
#ifdef USE_CUTOFF
int firstNeighbor = neighborStartIndex[p1];
int numNeighbors = neighborStartIndex[p1+1]-firstNeighbor;
#else
#ifdef USE_CENTRAL_PARTICLE
int numNeighbors = NUM_ATOMS;
#else
int numNeighbors = NUM_ATOMS-p1-1;
#endif
#endif
int numCombinations = NUM_CANDIDATE_COMBINATIONS;
for (int index = threadIdx.x; index < numCombinations; index += blockDim.x) {
FIND_ATOMS_FOR_COMBINATION_INDEX;
bool includeInteraction = IS_VALID_COMBINATION;
#ifdef USE_CUTOFF
if (includeInteraction) {
VERIFY_CUTOFF;
}
#endif
#ifdef USE_FILTERS
int order = orderIndex[COMPUTE_TYPE_INDEX];
if (order == -1)
includeInteraction = false;
#endif
#ifdef USE_EXCLUSIONS
if (includeInteraction) {
VERIFY_EXCLUSIONS;
}
#endif
if (includeInteraction) {
PERMUTE_ATOMS;
LOAD_PARTICLE_DATA;
COMPUTE_INTERACTION;
}
}
}
energyBuffer[blockIdx.x*blockDim.x+threadIdx.x] += energy;
}
/**
* Find a bounding box for the atoms in each block.
*/
extern "C" __global__ void findBlockBounds(real4 periodicBoxSize, real4 invPeriodicBoxSize, const real4* __restrict__ posq,
real4* __restrict__ blockCenter, real4* __restrict__ blockBoundingBox, int* __restrict__ numNeighborPairs) {
int index = blockIdx.x*blockDim.x+threadIdx.x;
int base = index*TILE_SIZE;
while (base < NUM_ATOMS) {
real4 pos = posq[base];
#ifdef USE_PERIODIC
pos.x -= floor(pos.x*invPeriodicBoxSize.x)*periodicBoxSize.x;
pos.y -= floor(pos.y*invPeriodicBoxSize.y)*periodicBoxSize.y;
pos.z -= floor(pos.z*invPeriodicBoxSize.z)*periodicBoxSize.z;
#endif
real4 minPos = pos;
real4 maxPos = pos;
int last = min(base+TILE_SIZE, NUM_ATOMS);
for (int i = base+1; i < last; i++) {
pos = posq[i];
#ifdef USE_PERIODIC
real4 center = 0.5f*(maxPos+minPos);
pos.x -= floor((pos.x-center.x)*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
pos.y -= floor((pos.y-center.y)*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
pos.z -= floor((pos.z-center.z)*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
minPos = make_real4(min(minPos.x,pos.x), min(minPos.y,pos.y), min(minPos.z,pos.z), 0);
maxPos = make_real4(max(maxPos.x,pos.x), max(maxPos.y,pos.y), max(maxPos.z,pos.z), 0);
}
real4 blockSize = 0.5f*(maxPos-minPos);
blockBoundingBox[index] = blockSize;
blockCenter[index] = 0.5f*(maxPos+minPos);
index += blockDim.x*gridDim.x;
base = index*TILE_SIZE;
}
if (blockIdx.x == 0 && threadIdx.x == 0)
*numNeighborPairs = 0;
}
/**
* Find a list of neighbors for each atom.
*/
extern "C" __global__ void findNeighbors(real4 periodicBoxSize, real4 invPeriodicBoxSize, const real4* __restrict__ posq,
const real4* __restrict__ blockCenter, const real4* __restrict__ blockBoundingBox, int2* __restrict__ neighborPairs,
int* __restrict__ numNeighborPairs, int* __restrict__ numNeighborsForAtom, int maxNeighborPairs
#ifdef USE_EXCLUSIONS
, int* __restrict__ exclusions, int* __restrict__ exclusionStartIndex
#endif
) {
__shared__ real3 positionCache[FIND_NEIGHBORS_WORKGROUP_SIZE];
int indexInWarp = threadIdx.x%32;
for (int atom1 = blockIdx.x*blockDim.x+threadIdx.x; atom1 < PADDED_NUM_ATOMS; atom1 += blockDim.x*gridDim.x) {
// Load data for this atom. Note that all threads in a warp are processing atoms from the same block.
real3 pos1 = trim(posq[atom1]);
int block1 = atom1/TILE_SIZE;
real4 blockCenter1 = blockCenter[block1];
real4 blockSize1 = blockBoundingBox[block1];
int totalNeighborsForAtom1 = 0;
// Loop over atom blocks to search for neighbors. The threads in a warp compare block1 against 32
// other blocks in parallel.
#ifdef USE_CENTRAL_PARTICLE
int startBlock = 0;
#else
int startBlock = block1;
#endif
for (int block2Base = startBlock; block2Base < NUM_BLOCKS; block2Base += 32) {
int block2 = block2Base+indexInWarp;
bool includeBlock2 = (block2 < NUM_BLOCKS);
if (includeBlock2) {
real4 blockCenter2 = blockCenter[block2];
real4 blockSize2 = blockBoundingBox[block2];
real4 blockDelta = blockCenter1-blockCenter2;
#ifdef USE_PERIODIC
blockDelta.x -= floor(blockDelta.x*invPeriodicBoxSize.x+0.5f)*periodicBoxSize.x;
blockDelta.y -= floor(blockDelta.y*invPeriodicBoxSize.y+0.5f)*periodicBoxSize.y;
blockDelta.z -= floor(blockDelta.z*invPeriodicBoxSize.z+0.5f)*periodicBoxSize.z;
#endif
blockDelta.x = max(0.0f, fabs(blockDelta.x)-blockSize1.x-blockSize2.x);
blockDelta.y = max(0.0f, fabs(blockDelta.y)-blockSize1.y-blockSize2.y);
blockDelta.z = max(0.0f, fabs(blockDelta.z)-blockSize1.z-blockSize2.z);
includeBlock2 &= (blockDelta.x*blockDelta.x+blockDelta.y*blockDelta.y+blockDelta.z*blockDelta.z < CUTOFF_SQUARED);
}
// Loop over any blocks we identified as potentially containing neighbors.
int includeBlockFlags = __ballot(includeBlock2);
while (includeBlockFlags != 0) {
int i = __ffs(includeBlockFlags)-1;
includeBlockFlags &= includeBlockFlags-1;
int block2 = block2Base+i;
// Loop over atoms in this block.
int start = block2*TILE_SIZE;
int included[TILE_SIZE];
int numIncluded = 0;
positionCache[threadIdx.x] = trim(posq[start+indexInWarp]);
if (atom1 < NUM_ATOMS) {
for (int j = 0; j < 32; j++) {
int atom2 = start+j;
real3 pos2 = positionCache[threadIdx.x-indexInWarp+j];
// Decide whether to include this atom pair in the neighbor list.
real4 atomDelta = delta(pos1, pos2, periodicBoxSize, invPeriodicBoxSize);
#ifdef USE_CENTRAL_PARTICLE
bool includeAtom = (atom2 != atom1 && atom2 < NUM_ATOMS && atomDelta.w < CUTOFF_SQUARED);
#else
bool includeAtom = (atom2 > atom1 && atom2 < NUM_ATOMS && atomDelta.w < CUTOFF_SQUARED);
#endif
#ifdef USE_EXCLUSIONS
if (includeAtom)
includeAtom &= !isInteractionExcluded(atom1, atom2, exclusions, exclusionStartIndex);
#endif
if (includeAtom)
included[numIncluded++] = atom2;
}
}
// If we found any neighbors, store them to the neighbor list.
if (numIncluded > 0) {
int baseIndex = atomicAdd(numNeighborPairs, numIncluded);
if (baseIndex+numIncluded <= maxNeighborPairs)
for (int j = 0; j < numIncluded; j++)
neighborPairs[baseIndex+j] = make_int2(atom1, included[j]);
totalNeighborsForAtom1 += numIncluded;
}
}
}
numNeighborsForAtom[atom1] = totalNeighborsForAtom1;
}
}
/**
* Sum the neighbor counts to compute the start position of each atom. This kernel
* is executed as a single work group.
*/
extern "C" __global__ void computeNeighborStartIndices(int* __restrict__ numNeighborsForAtom, int* __restrict__ neighborStartIndex,
int* __restrict__ numNeighborPairs, int maxNeighborPairs) {
extern __shared__ unsigned int posBuffer[];
if (*numNeighborPairs > maxNeighborPairs) {
// There wasn't enough memory for the neighbor list, so we'll need to rebuild it. Set the neighbor start
// indices to indicate no neighbors for any atom.
for (int i = threadIdx.x; i <= NUM_ATOMS; i += blockDim.x)
neighborStartIndex[i] = 0;
return;
}
unsigned int globalOffset = 0;
for (unsigned int startAtom = 0; startAtom < NUM_ATOMS; startAtom += blockDim.x) {
// Load the neighbor counts into local memory.
unsigned int globalIndex = startAtom+threadIdx.x;
posBuffer[threadIdx.x] = (globalIndex < NUM_ATOMS ? numNeighborsForAtom[globalIndex] : 0);
__syncthreads();
// Perform a parallel prefix sum.
for (unsigned int step = 1; step < blockDim.x; step *= 2) {
unsigned int add = (threadIdx.x >= step ? posBuffer[threadIdx.x-step] : 0);
__syncthreads();
posBuffer[threadIdx.x] += add;
__syncthreads();
}
// Write the results back to global memory.
if (globalIndex < NUM_ATOMS) {
neighborStartIndex[globalIndex+1] = posBuffer[threadIdx.x]+globalOffset;
numNeighborsForAtom[globalIndex] = 0; // Clear this so the next kernel can use it as a counter
}
globalOffset += posBuffer[blockDim.x-1];
}
if (threadIdx.x == 0)
neighborStartIndex[0] = 0;
}
/**
* Assemble the final neighbor list.
*/
extern "C" __global__ void copyPairsToNeighborList(const int2* __restrict__ neighborPairs, int* __restrict__ neighbors, int* __restrict__ numNeighborPairs,
int maxNeighborPairs, int* __restrict__ numNeighborsForAtom, const int* __restrict__ neighborStartIndex) {
int actualPairs = *numNeighborPairs;
if (actualPairs > maxNeighborPairs)
return; // There wasn't enough memory for the neighbor list, so we'll need to rebuild it.
for (unsigned int index = blockDim.x*blockIdx.x+threadIdx.x; index < actualPairs; index += blockDim.x*gridDim.x) {
int2 pair = neighborPairs[index];
int startIndex = neighborStartIndex[pair.x];
int offset = atomicAdd(numNeighborsForAtom+pair.x, 1);
neighbors[startIndex+offset] = pair.y;
}
}
|
557697444142da35ee3ad2ee4a40f438d0a3888c.hip | // !!! This is a file automatically generated by hipify!!!
///////////////////////////////////////////////////////////////////////////////
//
// The MIT License
//
// Copyright (c) 2006 Scientific Computing and Imaging Institute,
// University of Utah (USA)
//
// License for the specific language governing rights and limitations under
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef ELVIS_EXTENSIONS_JACOBI_EXTENSION_OPTIX_HEXAHEDRON_CU
#define ELVIS_EXTENSIONS_JACOBI_EXTENSION_OPTIX_HEXAHEDRON_CU
#include <optix_cuda.h>
#include <optix_math.h>
#include <ElVis/Core/matrix.cu>
#include <optixu/optixu_aabb.h>
//#include "matrix.hip"
#include <ElVis/Core/CutSurfacePayloads.cu>
#include <ElVis/Core/VolumeRenderingPayload.cu>
#include <ElVis/Core/typedefs.cu>
#include <ElVis/Core/jacobi.cu>
#include <ElVis/Core/util.cu>
#include <ElVis/Core/OptixVariables.cu>
#include <ElVis/Core/Interval.hpp>
#include <ElVis/Core/IntervalPoint.cu>
#include <ElVis/Extensions/JacobiExtension/HexahedronCommon.cu>
// The vertices associated with this hex.
rtBuffer<ElVisFloat4> HexVertexBuffer;
// Hexvertex_face_index[i] gives the index for the four
// vertices associated with face i.
rtBuffer<uint4> Hexvertex_face_index;
// Defines the planes for each hex side.
rtBuffer<ElVisFloat4> HexPlaneBuffer;
rtBuffer<ElVisFloat> HexCoefficients;
rtBuffer<uint> HexCoefficientIndices;
rtBuffer<uint3> HexDegrees;
rtDeclareVariable(int, intersectedHexId, attribute IntersectedHex, );
__device__ __forceinline__ bool IntersectsFace(int hexId, unsigned int faceNumber,
ElVisFloat4* p, const ElVisFloat3& origin, const ElVisFloat3& direction,
ElVisFloat& t)
{
uint4 index = Hexvertex_face_index[faceNumber];
bool result = false;
if( ContainsOrigin(p[index.x], p[index.y], p[index.z], p[index.w]) )
{
result = FindPlaneIntersection(origin, direction, GetPlane(&HexPlaneBuffer[0], hexId, faceNumber), t);
}
return result;
}
// Half plane version.
//// Determines if the given ray intersects the given hex. Returns true if it does, false otherwise. If an intersection is
//// found, t is the value of the closest intersection.
//__device__ bool HexahedronIntersection(const ElVisFloat3& origin, const ElVisFloat3& direction, int hexId, const ElVisFloat& closestT, ElVisFloat& t)
//{
// t = closestT;
// for(int faceId = 0; faceId < 6; ++faceId)
// {
// // Check to see if we intersect this face.
// ElVisFloat plane_t;
// bool intersectsFace = FindPlaneIntersection(origin, direction, GetPlane(HexPlaneBuffer, hexId, faceId), plane_t);
// bool testInside = intersectsFace;
// testInside &= (plane_t < t );
// if( testInside )
// {
// WorldPoint intersectionPoint = origin + plane_t*direction;
// bool insideOtherFaces = true;
// for(int insideFaceId = 0; insideFaceId < 6; ++insideFaceId)
// {
// if( insideFaceId != faceId )
// {
// ElVisFloat planeVal = EvaluatePlane(GetPlane(HexPlaneBuffer, hexId, insideFaceId), intersectionPoint);
// insideOtherFaces &= planeVal <= MAKE_FLOAT(0.0);
// if( !insideOtherFaces ) break;
// }
// }
// if( insideOtherFaces )
// {
// t = plane_t;
// }
// }
// }
// return t != ELVIS_FLOAT_MAX;
//}
__device__ __forceinline__ void FindRayElementIntersection(int hexId)
{
// // This method causes slow compiles
// ELVIS_PRINTF("FindRayElementIntersection, ray extents [%f, %f]\n", ray.tmin, ray.tmax);
// ElVisFloat3 origin = MakeFloat3(ray.origin);
// ElVisFloat3 W = MakeFloat3(ray.direction);
// normalize(W);
// ElVisFloat3 U,V;
// GenerateUVWCoordinateSystem(W, U, V);
// // Project each vertex onto the ray's coorindate system
// ElVis::Matrix<4,4> M1;
// M1.getData()[0] = U.x;
// M1.getData()[1] = U.y;
// M1.getData()[2] = U.z;
// M1.getData()[3] = MAKE_FLOAT(0.0);
// M1.getData()[4] = V.x;
// M1.getData()[5] = V.y;
// M1.getData()[6] = V.z;
// M1.getData()[7] = MAKE_FLOAT(0.0);
// M1.getData()[8] = W.x;
// M1.getData()[9] = W.y;
// M1.getData()[10] = W.z;
// M1.getData()[11] = MAKE_FLOAT(0.0);
// M1.getData()[12] = MAKE_FLOAT(0.0);
// M1.getData()[13] = MAKE_FLOAT(0.0);
// M1.getData()[14] = MAKE_FLOAT(0.0);
// M1.getData()[15] = MAKE_FLOAT(1.0);
// ElVis::Matrix<4,4> M2;
// M2.getData()[0] = MAKE_FLOAT(1.0);
// M2.getData()[1] = MAKE_FLOAT(0.0);
// M2.getData()[2] = MAKE_FLOAT(0.0);
// M2.getData()[3] = -origin.x;
// M2.getData()[4] = MAKE_FLOAT(0.0);
// M2.getData()[5] = MAKE_FLOAT(1.0);
// M2.getData()[6] = MAKE_FLOAT(0.0);
// M2.getData()[7] = -origin.y;
// M2.getData()[8] = MAKE_FLOAT(0.0);
// M2.getData()[9] = MAKE_FLOAT(0.0);
// M2.getData()[10] = MAKE_FLOAT(1.0);
// M2.getData()[11] = -origin.z;
// M2.getData()[12] = MAKE_FLOAT(0.0);
// M2.getData()[13] = MAKE_FLOAT(0.0);
// M2.getData()[14] = MAKE_FLOAT(0.0);
// M2.getData()[15] = MAKE_FLOAT(1.0);
// ElVis::Matrix<4,4> M3 = M1*M2;
// // Fill in as p16 of pete's book.
// ElVisFloat4 p[] = {
// M3*GetVertex(&HexVertexBuffer[0], hexId, 0),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 1),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 2),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 3),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 4),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 5),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 6),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 7) };
// for(unsigned int faceId = 0; faceId < 6; ++faceId)
// {
// ElVisFloat t = MAKE_FLOAT(-1.0);
// if( IntersectsFace(hexId, faceId, p, origin, W, t) )
// {
// if( rtPotentialIntersection( t ) )
// {
// intersectedHexId = hexId;
// volumePayload.FoundIntersection = 1;
// volumePayload.ElementId = hexId;
// volumePayload.ElementTypeId = 0;
// volumePayload.IntersectionT = t;
// rtReportIntersection(0);
// }
// }
// }
}
__device__ __forceinline__ void CheckIfOriginIsInElement(int hexId)
{
ElVisFloat3 origin = MakeFloat3(ray.origin);
// All planes point out, so each plane needs to return <= 0.
ElVisFloat p0 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 0), origin);
ElVisFloat p1 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 1), origin);
ElVisFloat p2 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 2), origin);
ElVisFloat p3 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 3), origin);
ElVisFloat p4 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 4), origin);
ElVisFloat p5 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 5), origin);
if( p0 <= MAKE_FLOAT(0.001) && p1 <= MAKE_FLOAT(0.001) && p2 <= MAKE_FLOAT(0.001) && p3 <= MAKE_FLOAT(0.001) && p4 <= MAKE_FLOAT(0.001) && p5 <= MAKE_FLOAT(0.001) )
{
//ElVis::TensorPoint tp = TransformWorldToTensor(ray.origin);
//if( tp.x <= -1 || tp.x >= 1 ||
// tp.y <= -1 || tp.y >= 1 ||
// tp.z <= -1 || tp.z >= 1 )
//{
// return;
//}
if( rtPotentialIntersection( .1 ) )
{
intersectedHexId = hexId;
intersectionPointPayload.elementId = hexId;
intersectionPointPayload.elementType = 0;
rtReportIntersection(0);
}
}
}
RT_PROGRAM void HexahedronIntersection(int hexId)
{
if( ray.ray_type == 1 )
{
// Find Element Ray
CheckIfOriginIsInElement(hexId);
}
else
{
FindRayElementIntersection(hexId);
}
}
RT_PROGRAM void hexahedron_bounding (int id, float result[6])
{
optix::Aabb* aabb = (optix::Aabb*)result;
const ElVisFloat4& v0 = GetVertex(&HexVertexBuffer[0], id, 0);
const ElVisFloat4& v1 = GetVertex(&HexVertexBuffer[0], id, 1);
const ElVisFloat4& v2 = GetVertex(&HexVertexBuffer[0], id, 2);
const ElVisFloat4& v3 = GetVertex(&HexVertexBuffer[0], id, 3);
const ElVisFloat4& v4 = GetVertex(&HexVertexBuffer[0], id, 4);
const ElVisFloat4& v5 = GetVertex(&HexVertexBuffer[0], id, 5);
const ElVisFloat4& v6 = GetVertex(&HexVertexBuffer[0], id, 6);
const ElVisFloat4& v7 = GetVertex(&HexVertexBuffer[0], id, 7);
aabb->m_min.x = fminf(fminf(fminf(fminf(fminf(fminf(fminf(v0.x, v1.x), v2.x), v3.x), v4.x), v5.x), v6.x), v7.x);
aabb->m_min.y = fminf(fminf(fminf(fminf(fminf(fminf(fminf(v0.y, v1.y), v2.y), v3.y), v4.y), v5.y), v6.y), v7.y);
aabb->m_min.z = fminf(fminf(fminf(fminf(fminf(fminf(fminf(v0.z, v1.z), v2.z), v3.z), v4.z), v5.z), v6.z), v7.z);
aabb->m_max.x = fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(v0.x, v1.x), v2.x), v3.x), v4.x), v5.x), v6.x), v7.x);
aabb->m_max.y = fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(v0.y, v1.y), v2.y), v3.y), v4.y), v5.y), v6.y), v7.y);
aabb->m_max.z = fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(v0.z, v1.z), v2.z), v3.z), v4.z), v5.z), v6.z), v7.z);
}
#endif
| 557697444142da35ee3ad2ee4a40f438d0a3888c.cu | ///////////////////////////////////////////////////////////////////////////////
//
// The MIT License
//
// Copyright (c) 2006 Scientific Computing and Imaging Institute,
// University of Utah (USA)
//
// License for the specific language governing rights and limitations under
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef ELVIS_EXTENSIONS_JACOBI_EXTENSION_OPTIX_HEXAHEDRON_CU
#define ELVIS_EXTENSIONS_JACOBI_EXTENSION_OPTIX_HEXAHEDRON_CU
#include <optix_cuda.h>
#include <optix_math.h>
#include <ElVis/Core/matrix.cu>
#include <optixu/optixu_aabb.h>
//#include "matrix.cu"
#include <ElVis/Core/CutSurfacePayloads.cu>
#include <ElVis/Core/VolumeRenderingPayload.cu>
#include <ElVis/Core/typedefs.cu>
#include <ElVis/Core/jacobi.cu>
#include <ElVis/Core/util.cu>
#include <ElVis/Core/OptixVariables.cu>
#include <ElVis/Core/Interval.hpp>
#include <ElVis/Core/IntervalPoint.cu>
#include <ElVis/Extensions/JacobiExtension/HexahedronCommon.cu>
// The vertices associated with this hex.
rtBuffer<ElVisFloat4> HexVertexBuffer;
// Hexvertex_face_index[i] gives the index for the four
// vertices associated with face i.
rtBuffer<uint4> Hexvertex_face_index;
// Defines the planes for each hex side.
rtBuffer<ElVisFloat4> HexPlaneBuffer;
rtBuffer<ElVisFloat> HexCoefficients;
rtBuffer<uint> HexCoefficientIndices;
rtBuffer<uint3> HexDegrees;
rtDeclareVariable(int, intersectedHexId, attribute IntersectedHex, );
__device__ __forceinline__ bool IntersectsFace(int hexId, unsigned int faceNumber,
ElVisFloat4* p, const ElVisFloat3& origin, const ElVisFloat3& direction,
ElVisFloat& t)
{
uint4 index = Hexvertex_face_index[faceNumber];
bool result = false;
if( ContainsOrigin(p[index.x], p[index.y], p[index.z], p[index.w]) )
{
result = FindPlaneIntersection(origin, direction, GetPlane(&HexPlaneBuffer[0], hexId, faceNumber), t);
}
return result;
}
// Half plane version.
//// Determines if the given ray intersects the given hex. Returns true if it does, false otherwise. If an intersection is
//// found, t is the value of the closest intersection.
//__device__ bool HexahedronIntersection(const ElVisFloat3& origin, const ElVisFloat3& direction, int hexId, const ElVisFloat& closestT, ElVisFloat& t)
//{
// t = closestT;
// for(int faceId = 0; faceId < 6; ++faceId)
// {
// // Check to see if we intersect this face.
// ElVisFloat plane_t;
// bool intersectsFace = FindPlaneIntersection(origin, direction, GetPlane(HexPlaneBuffer, hexId, faceId), plane_t);
// bool testInside = intersectsFace;
// testInside &= (plane_t < t );
// if( testInside )
// {
// WorldPoint intersectionPoint = origin + plane_t*direction;
// bool insideOtherFaces = true;
// for(int insideFaceId = 0; insideFaceId < 6; ++insideFaceId)
// {
// if( insideFaceId != faceId )
// {
// ElVisFloat planeVal = EvaluatePlane(GetPlane(HexPlaneBuffer, hexId, insideFaceId), intersectionPoint);
// insideOtherFaces &= planeVal <= MAKE_FLOAT(0.0);
// if( !insideOtherFaces ) break;
// }
// }
// if( insideOtherFaces )
// {
// t = plane_t;
// }
// }
// }
// return t != ELVIS_FLOAT_MAX;
//}
__device__ __forceinline__ void FindRayElementIntersection(int hexId)
{
// // This method causes slow compiles
// ELVIS_PRINTF("FindRayElementIntersection, ray extents [%f, %f]\n", ray.tmin, ray.tmax);
// ElVisFloat3 origin = MakeFloat3(ray.origin);
// ElVisFloat3 W = MakeFloat3(ray.direction);
// normalize(W);
// ElVisFloat3 U,V;
// GenerateUVWCoordinateSystem(W, U, V);
// // Project each vertex onto the ray's coorindate system
// ElVis::Matrix<4,4> M1;
// M1.getData()[0] = U.x;
// M1.getData()[1] = U.y;
// M1.getData()[2] = U.z;
// M1.getData()[3] = MAKE_FLOAT(0.0);
// M1.getData()[4] = V.x;
// M1.getData()[5] = V.y;
// M1.getData()[6] = V.z;
// M1.getData()[7] = MAKE_FLOAT(0.0);
// M1.getData()[8] = W.x;
// M1.getData()[9] = W.y;
// M1.getData()[10] = W.z;
// M1.getData()[11] = MAKE_FLOAT(0.0);
// M1.getData()[12] = MAKE_FLOAT(0.0);
// M1.getData()[13] = MAKE_FLOAT(0.0);
// M1.getData()[14] = MAKE_FLOAT(0.0);
// M1.getData()[15] = MAKE_FLOAT(1.0);
// ElVis::Matrix<4,4> M2;
// M2.getData()[0] = MAKE_FLOAT(1.0);
// M2.getData()[1] = MAKE_FLOAT(0.0);
// M2.getData()[2] = MAKE_FLOAT(0.0);
// M2.getData()[3] = -origin.x;
// M2.getData()[4] = MAKE_FLOAT(0.0);
// M2.getData()[5] = MAKE_FLOAT(1.0);
// M2.getData()[6] = MAKE_FLOAT(0.0);
// M2.getData()[7] = -origin.y;
// M2.getData()[8] = MAKE_FLOAT(0.0);
// M2.getData()[9] = MAKE_FLOAT(0.0);
// M2.getData()[10] = MAKE_FLOAT(1.0);
// M2.getData()[11] = -origin.z;
// M2.getData()[12] = MAKE_FLOAT(0.0);
// M2.getData()[13] = MAKE_FLOAT(0.0);
// M2.getData()[14] = MAKE_FLOAT(0.0);
// M2.getData()[15] = MAKE_FLOAT(1.0);
// ElVis::Matrix<4,4> M3 = M1*M2;
// // Fill in as p16 of pete's book.
// ElVisFloat4 p[] = {
// M3*GetVertex(&HexVertexBuffer[0], hexId, 0),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 1),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 2),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 3),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 4),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 5),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 6),
// M3*GetVertex(&HexVertexBuffer[0], hexId, 7) };
// for(unsigned int faceId = 0; faceId < 6; ++faceId)
// {
// ElVisFloat t = MAKE_FLOAT(-1.0);
// if( IntersectsFace(hexId, faceId, p, origin, W, t) )
// {
// if( rtPotentialIntersection( t ) )
// {
// intersectedHexId = hexId;
// volumePayload.FoundIntersection = 1;
// volumePayload.ElementId = hexId;
// volumePayload.ElementTypeId = 0;
// volumePayload.IntersectionT = t;
// rtReportIntersection(0);
// }
// }
// }
}
__device__ __forceinline__ void CheckIfOriginIsInElement(int hexId)
{
ElVisFloat3 origin = MakeFloat3(ray.origin);
// All planes point out, so each plane needs to return <= 0.
ElVisFloat p0 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 0), origin);
ElVisFloat p1 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 1), origin);
ElVisFloat p2 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 2), origin);
ElVisFloat p3 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 3), origin);
ElVisFloat p4 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 4), origin);
ElVisFloat p5 = EvaluatePlane(GetPlane(&HexPlaneBuffer[0], hexId, 5), origin);
if( p0 <= MAKE_FLOAT(0.001) && p1 <= MAKE_FLOAT(0.001) && p2 <= MAKE_FLOAT(0.001) && p3 <= MAKE_FLOAT(0.001) && p4 <= MAKE_FLOAT(0.001) && p5 <= MAKE_FLOAT(0.001) )
{
//ElVis::TensorPoint tp = TransformWorldToTensor(ray.origin);
//if( tp.x <= -1 || tp.x >= 1 ||
// tp.y <= -1 || tp.y >= 1 ||
// tp.z <= -1 || tp.z >= 1 )
//{
// return;
//}
if( rtPotentialIntersection( .1 ) )
{
intersectedHexId = hexId;
intersectionPointPayload.elementId = hexId;
intersectionPointPayload.elementType = 0;
rtReportIntersection(0);
}
}
}
RT_PROGRAM void HexahedronIntersection(int hexId)
{
if( ray.ray_type == 1 )
{
// Find Element Ray
CheckIfOriginIsInElement(hexId);
}
else
{
FindRayElementIntersection(hexId);
}
}
RT_PROGRAM void hexahedron_bounding (int id, float result[6])
{
optix::Aabb* aabb = (optix::Aabb*)result;
const ElVisFloat4& v0 = GetVertex(&HexVertexBuffer[0], id, 0);
const ElVisFloat4& v1 = GetVertex(&HexVertexBuffer[0], id, 1);
const ElVisFloat4& v2 = GetVertex(&HexVertexBuffer[0], id, 2);
const ElVisFloat4& v3 = GetVertex(&HexVertexBuffer[0], id, 3);
const ElVisFloat4& v4 = GetVertex(&HexVertexBuffer[0], id, 4);
const ElVisFloat4& v5 = GetVertex(&HexVertexBuffer[0], id, 5);
const ElVisFloat4& v6 = GetVertex(&HexVertexBuffer[0], id, 6);
const ElVisFloat4& v7 = GetVertex(&HexVertexBuffer[0], id, 7);
aabb->m_min.x = fminf(fminf(fminf(fminf(fminf(fminf(fminf(v0.x, v1.x), v2.x), v3.x), v4.x), v5.x), v6.x), v7.x);
aabb->m_min.y = fminf(fminf(fminf(fminf(fminf(fminf(fminf(v0.y, v1.y), v2.y), v3.y), v4.y), v5.y), v6.y), v7.y);
aabb->m_min.z = fminf(fminf(fminf(fminf(fminf(fminf(fminf(v0.z, v1.z), v2.z), v3.z), v4.z), v5.z), v6.z), v7.z);
aabb->m_max.x = fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(v0.x, v1.x), v2.x), v3.x), v4.x), v5.x), v6.x), v7.x);
aabb->m_max.y = fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(v0.y, v1.y), v2.y), v3.y), v4.y), v5.y), v6.y), v7.y);
aabb->m_max.z = fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(fmaxf(v0.z, v1.z), v2.z), v3.z), v4.z), v5.z), v6.z), v7.z);
}
#endif
|
bdcdb4dd0174f06796f0218d8bfb88d1bcfa02de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Ex. 2
// =====
// Remove the call to hipDeviceReset()
//
// The result is: nothing is printed from the device to the shell.
#include <stdio.h>
__global__
void helloFromGPU() {
printf("Hello World from GPU!\n");
}
int main(int argc, char *argv[]) {
// Hello from CPU
printf("Hello World from CPU!\n");
hipLaunchKernelGGL(( helloFromGPU), dim3(1), dim3(10), 0, 0, );
// hipDeviceReset();
return 0;
}
| bdcdb4dd0174f06796f0218d8bfb88d1bcfa02de.cu | // Ex. 2
// =====
// Remove the call to cudaDeviceReset()
//
// The result is: nothing is printed from the device to the shell.
#include <stdio.h>
__global__
void helloFromGPU() {
printf("Hello World from GPU!\n");
}
int main(int argc, char *argv[]) {
// Hello from CPU
printf("Hello World from CPU!\n");
helloFromGPU<<<1, 10>>>();
// cudaDeviceReset();
return 0;
}
|
38257e909302dda19afc6ae2f26604afd54cb467.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--warp-sync=32 --blockDim=32 --gridDim=1 --equality-abstraction --no-inline
//kernel.cu:10
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#define N 2//32
__global__ void foo(int * A) {
A[0] = 1;
A[1] = 1;
A[threadIdx.x] = 0;
//__assert(A[0] == 1 | A[1] == 1 | A[2] == 1);
}
| 38257e909302dda19afc6ae2f26604afd54cb467.cu | //xfail:BOOGIE_ERROR
//--warp-sync=32 --blockDim=32 --gridDim=1 --equality-abstraction --no-inline
//kernel.cu:10
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#define N 2//32
__global__ void foo(int * A) {
A[0] = 1;
A[1] = 1;
A[threadIdx.x] = 0;
//__assert(A[0] == 1 | A[1] == 1 | A[2] == 1);
}
|
8890683f2bec3f8f4fc8899f461311670be38f2d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#include <iostream>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include "param.h"
using std::cout;
using std::endl;
#define M_PI 3.14159265358979323846
typedef unsigned int uint;
typedef unsigned char uchar;
//typedef unsigned short VolumeType;
typedef unsigned char VolumeType;
class FractalJuliaSet
{
float radius;
float4 cc;
int maxIter;
__device__
float4 quatSq(float4 q)
{
float3 q_yzw = make_float3(q.y, q.z, q.w);
float r0 = q.x * q.x - dot(q_yzw, q_yzw);
float3 r_yzw = q_yzw * (q.x * 2);
return make_float4(
r0,
r_yzw.x,
r_yzw.y,
r_yzw.z);
}
__device__
float eval_fractal(const float3& pos, float radius, const float4& c, int maxIter){
float4 q = make_float4(pos.x * radius,
pos.y * radius,
pos.z * radius, 0);
int iter = 0;
do
{
q = quatSq(q);
q += c;
} while (dot(q, q) < 10.0f && iter++ < maxIter);
// return iter * (iter>5);
// return iter / float(maxIter);
// return log((float)iter+1) / log((float)maxIter);
return (iter > maxIter * 0.9);
}
public:
__device__
float density(const float3& pos)
{
return eval_fractal(pos, radius, cc, maxIter);
}
__device__
FractalJuliaSet()
{
radius = 1.4f;// 3.0f;
// setFloat4(cc, -1, 0.2, 0, 0);
// setFloat4(cc, -0.291,-0.399,0.339,0.437);
// setFloat4(cc, -0.2,0.4,-0.4,-0.4);
// setFloat4(cc, -0.213,-0.0410,-0.563,-0.560);
// setFloat4(cc, -0.2,0.6,0.2,0.2);
// setFloat4(cc, -0.162,0.163,0.560,-0.599);
cc = make_float4(-0.2f, 0.8f, 0.0f, 0.0f);
// setFloat4(cc, -0.445,0.339,-0.0889,-0.562);
// setFloat4(cc, 0.185,0.478,0.125,-0.392);
// setFloat4(cc, -0.450,-0.447,0.181,0.306);
// setFloat4(cc, -0.218,-0.113,-0.181,-0.496);
// setFloat4(cc, -0.137,-0.630,-0.475,-0.046);
// setFloat4(cc, -0.125,-0.256,0.847,0.0895);
// maxIter = 20;
maxIter = 30;
}
};
class CudaRng
{
hiprandStateXORWOW_t state;
public:
__device__
void init(unsigned int seed)
{
hiprand_init(seed, 0, 0, &state);
}
__device__
float next()
{
return hiprand_uniform(&state);
}
};
hipArray *d_volumeArray = 0;
texture<VolumeType, 3, hipReadModeNormalizedFloat> density_tex; // 3D texture
CudaRng *cuda_rng = nullptr;
class Frame
{
float3 n, t, b; // normal, tangent, bitangent
public:
__device__
Frame(const float3& normal)
{
n = normalize(normal);
float3 a = fabs(n.x) > 0.1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0);
t = normalize(cross(a, n));
b = cross(n, t);
}
__device__
float3 toWorld(const float3& c) const
{
return t * c.x + b * c.y + n * c.z;
}
__device__
const float3& normal() const
{
return n;
}
__device__
const float3& tangent() const
{
return t;
}
__device__
const float3& bitangent() const
{
return b;
}
};
class HGPhaseFunction
{
float g;
// perfect inversion, pdf matches evaluation exactly
__device__
float3 sample(float rnd0, float rnd1) const
{
float cos_theta;
if (fabs(g) > 1e-6f)
{
float s = 2.0f * rnd0 - 1.0f;
float f = (1.0f - g * g) / (1.0f + g * s);
cos_theta = (0.5f / g) * (1.0f + g * g - f * f);
cos_theta = max(0.0f, min(1.0f, cos_theta));
}
else
{
cos_theta = 2.0f * rnd0 - 1.0f;
}
float sin_theta = sqrt(1.0f - cos_theta * cos_theta);
float phi = 2.0f * M_PI * rnd1;
float3 ret = make_float3(cos(phi) * sin_theta, sin(phi) * sin_theta, cos_theta);
return ret;
}
__device__
float evaluate(float cos_theta) const
{
return (1.0f - g * g) / (4.0f * M_PI * pow(1.0f + g * g - 2 * g * cos_theta, 1.5f));
}
public:
__device__
HGPhaseFunction(float g)
: g(g)
{
}
__device__
float3 sample(const Frame& frame, float rnd0, float rnd1) const
{
float3 s = sample(rnd0, rnd1);
return frame.toWorld(s);
}
__device__
float evaluate(const Frame& frame, const float3& dir) const
{
float cos_theta = dot(frame.normal(), dir);
return evaluate(cos_theta);
}
};
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray
{
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__
float vol_sigma_t(const float3& pos, float density)
{
// return density;
// // remap position to [0, 1] coordinates
// float t = tex3D(density_tex, pos.x * 0.5f + 0.5f, pos.y * 0.5f + 0.5f, pos.z * 0.5f + 0.5f);
// t = clamp(t, 0.0f, 1.0f) * density;
// return t;
// float x = pos.x * 0.5f + 0.5f;
// float y = pos.y * 0.5f + 0.5f;
// float z = pos.z * 0.5f + 0.5f;
// int xi = (int)ceil(5.0 * x);
// int yi = (int)ceil(5.0 * y);
// int zi = (int)ceil(5.0 * z);
// return float((xi + yi + zi) & 0x01) * density;
FractalJuliaSet fract;
return fract.density(pos) * density;
}
__device__
float Tr(
const float3 boxMin,
const float3 boxMax,
const float3& start_point,
const float3& end_point,
float inv_sigma,
float density,
CudaRng& rng)
{
Ray shadow_ray;
shadow_ray.o = start_point;
shadow_ray.d = normalize(end_point - start_point);
float t_near, t_far;
bool shade_vol = intersectBox(shadow_ray, boxMin, boxMax, &t_near, &t_far);
if (!shade_vol)
{
return 1.0f;
}
if (t_near < 0.0f) t_near = 0.0f; // clamp to near plane
float max_t = min(t_far, length(start_point - end_point));
float dist = t_near;
for (;;)
{
dist += -log(rng.next()) * inv_sigma;
if (dist >= max_t)
{
break;
}
float3 pos = shadow_ray.o + shadow_ray.d * dist;
if (rng.next() < vol_sigma_t(pos, density) * inv_sigma)
{
break;
}
}
return float(dist >= max_t);
}
__device__ __forceinline__
float4 background(const float3& dir)
{
return make_float4(0.15f, 0.20f, 0.25f, 1.0f) * 0.5f * (dir.y + 0.5);
}
__global__ void
__d_render(float4 *d_output, CudaRng *rngs, const Param P)
{
const float density = P.density;
const float brightness = P.brightness;
const float albedo = P.albedo;
const float3 light_pos = make_float3(100, 100, 100);
const float3 light_power = make_float3(1.0, 0.9, 0.8);
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= P.width) || (y >= P.height)) return;
CudaRng& rng = rngs[x + y * P.width];
float u = (x / (float) P.width) * 2.0f - 1.0f;
float v = (y / (float) P.height) * 2.0f - 1.0f;
HGPhaseFunction phase(P.g);
// calculate eye ray in world space
Ray cr;
cr.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
cr.d = normalize(make_float3(u, v, -2.0f));
cr.d = mul(c_invViewMatrix, cr.d);
float4 radiance = make_float4(0.0f);
float throughput = 1.0f;
int i;
for (i = 0; i < 20000; i++)
{
// find intersection with box
float t_near, t_far;
int hit = intersectBox(cr, boxMin, boxMax, &t_near, &t_far);
if (!hit)
{
radiance += background(cr.d) * throughput;
break;
}
if (t_near < 0.0f)
{
t_near = 0.0f; // clamp to near plane
}
/// woodcock tracking / delta tracking
float3 pos = cr.o + cr.d * t_near; // current position
float dist = t_near;
float max_sigma_t = density;
float inv_sigma = 1.0f / max_sigma_t;
bool through = false;
// delta tracking scattering event sampling
for (;;)
{
dist += -log(rng.next()) * inv_sigma;
pos = cr.o + cr.d * dist;
if (dist >= t_far)
{
through = true; // transmitted through the volume, probability is 1-exp(-optical_thickness)
break;
}
if (rng.next() < vol_sigma_t(pos, density) * inv_sigma)
{
break;
}
}
// probability is exp(-optical_thickness)
if (through)
{
radiance += background(cr.d) * throughput;
break;
}
throughput *= albedo;
Frame frame(cr.d);
// direct lighting
float a = Tr(boxMin, boxMax, pos, light_pos, inv_sigma, density, rng);
radiance += make_float4(light_power * (throughput * phase.evaluate(frame, normalize(light_pos - pos)) * a), 0.0f);
// scattered direction
float3 new_dir = phase.sample(frame, rng.next(), rng.next());
cr.o = pos;
cr.d = new_dir;
}
radiance *= brightness;
// write output color
float heat = i * 0.001;
d_output[x + y * P.width] += make_float4(
max(radiance.x, 0.0f),
max(radiance.y, 0.0f),
max(radiance.z, 0.0f),
heat);
}
extern "C"
void set_texture_filter_mode(bool bLinearFilter)
{
density_tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint;
}
extern "C"
void init_cuda(void *h_volume, hipExtent volumeSize)
{
// create 3D array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>();
checkCudaErrors(hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(h_volume,
volumeSize.width * sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
checkCudaErrors(hipMemcpy3D(©Params));
// set texture parameters
density_tex.normalized = true; // access with normalized texture coordinates
density_tex.filterMode = hipFilterModeLinear; // linear interpolation
density_tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
density_tex.addressMode[1] = hipAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(hipBindTextureToArray(density_tex, d_volumeArray, channelDesc));
}
extern "C"
void copy_inv_view_matrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
extern "C"
void free_cuda_buffers()
{
checkCudaErrors(hipFreeArray(d_volumeArray));
}
namespace XORShift
{
// XOR shift PRNG
unsigned int x = 123456789;
unsigned int y = 362436069;
unsigned int z = 521288629;
unsigned int w = 88675123;
inline unsigned int frand()
{
unsigned int t;
t = x ^ (x << 11);
x = y; y = z; z = w;
return (w = (w ^ (w >> 19)) ^ (t ^ (t >> 8)));
}
}
__global__
void __init_rng(CudaRng *rng, int width, int height, unsigned int *seeds)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x >= width) || (y >= height))
{
return;
}
int idx = x + y * width;
rng[idx].init(seeds[idx]);
}
extern "C"
void init_rng(dim3 gridSize, dim3 blockSize, int width, int height)
{
cout << "init cuda rng to " << width << " x " << height << endl;
unsigned int *seeds;
checkCudaErrors(hipMallocManaged(&seeds, sizeof(unsigned int) * width * height));
checkCudaErrors(hipDeviceSynchronize());
for (int i = 0; i < width * height; ++i)
{
seeds[i] = XORShift::frand();
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMalloc(&cuda_rng, sizeof(CudaRng) * width * height));
__init_rng << <gridSize, blockSize >> >(cuda_rng, width, height, seeds);
checkCudaErrors(hipFree(seeds));
}
extern "C"
void free_rng()
{
cout << "free cuda rng" << endl;
checkCudaErrors(hipFree(cuda_rng));
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, float4 *d_output, const Param& p)
{
hipLaunchKernelGGL(( __d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, cuda_rng, p);
}
__global__
void __scale(float4 *dst, float4 *src, int size, float scale)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size)
{
return;
}
dst[idx] = src[idx] * scale;
}
extern "C"
void scale(float4 *dst, float4 *src, int size, float scale)
{
__scale << <(size + 256 - 1) / 256, 256 >> >(dst, src, size, scale);
}
| 8890683f2bec3f8f4fc8899f461311670be38f2d.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#include <iostream>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include "param.h"
using std::cout;
using std::endl;
#define M_PI 3.14159265358979323846
typedef unsigned int uint;
typedef unsigned char uchar;
//typedef unsigned short VolumeType;
typedef unsigned char VolumeType;
class FractalJuliaSet
{
float radius;
float4 cc;
int maxIter;
__device__
float4 quatSq(float4 q)
{
float3 q_yzw = make_float3(q.y, q.z, q.w);
float r0 = q.x * q.x - dot(q_yzw, q_yzw);
float3 r_yzw = q_yzw * (q.x * 2);
return make_float4(
r0,
r_yzw.x,
r_yzw.y,
r_yzw.z);
}
__device__
float eval_fractal(const float3& pos, float radius, const float4& c, int maxIter){
float4 q = make_float4(pos.x * radius,
pos.y * radius,
pos.z * radius, 0);
int iter = 0;
do
{
q = quatSq(q);
q += c;
} while (dot(q, q) < 10.0f && iter++ < maxIter);
// return iter * (iter>5);
// return iter / float(maxIter);
// return log((float)iter+1) / log((float)maxIter);
return (iter > maxIter * 0.9);
}
public:
__device__
float density(const float3& pos)
{
return eval_fractal(pos, radius, cc, maxIter);
}
__device__
FractalJuliaSet()
{
radius = 1.4f;// 3.0f;
// setFloat4(cc, -1, 0.2, 0, 0);
// setFloat4(cc, -0.291,-0.399,0.339,0.437);
// setFloat4(cc, -0.2,0.4,-0.4,-0.4);
// setFloat4(cc, -0.213,-0.0410,-0.563,-0.560);
// setFloat4(cc, -0.2,0.6,0.2,0.2);
// setFloat4(cc, -0.162,0.163,0.560,-0.599);
cc = make_float4(-0.2f, 0.8f, 0.0f, 0.0f);
// setFloat4(cc, -0.445,0.339,-0.0889,-0.562);
// setFloat4(cc, 0.185,0.478,0.125,-0.392);
// setFloat4(cc, -0.450,-0.447,0.181,0.306);
// setFloat4(cc, -0.218,-0.113,-0.181,-0.496);
// setFloat4(cc, -0.137,-0.630,-0.475,-0.046);
// setFloat4(cc, -0.125,-0.256,0.847,0.0895);
// maxIter = 20;
maxIter = 30;
}
};
class CudaRng
{
curandStateXORWOW_t state;
public:
__device__
void init(unsigned int seed)
{
curand_init(seed, 0, 0, &state);
}
__device__
float next()
{
return curand_uniform(&state);
}
};
cudaArray *d_volumeArray = 0;
texture<VolumeType, 3, cudaReadModeNormalizedFloat> density_tex; // 3D texture
CudaRng *cuda_rng = nullptr;
class Frame
{
float3 n, t, b; // normal, tangent, bitangent
public:
__device__
Frame(const float3& normal)
{
n = normalize(normal);
float3 a = fabs(n.x) > 0.1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0);
t = normalize(cross(a, n));
b = cross(n, t);
}
__device__
float3 toWorld(const float3& c) const
{
return t * c.x + b * c.y + n * c.z;
}
__device__
const float3& normal() const
{
return n;
}
__device__
const float3& tangent() const
{
return t;
}
__device__
const float3& bitangent() const
{
return b;
}
};
class HGPhaseFunction
{
float g;
// perfect inversion, pdf matches evaluation exactly
__device__
float3 sample(float rnd0, float rnd1) const
{
float cos_theta;
if (fabs(g) > 1e-6f)
{
float s = 2.0f * rnd0 - 1.0f;
float f = (1.0f - g * g) / (1.0f + g * s);
cos_theta = (0.5f / g) * (1.0f + g * g - f * f);
cos_theta = max(0.0f, min(1.0f, cos_theta));
}
else
{
cos_theta = 2.0f * rnd0 - 1.0f;
}
float sin_theta = sqrt(1.0f - cos_theta * cos_theta);
float phi = 2.0f * M_PI * rnd1;
float3 ret = make_float3(cos(phi) * sin_theta, sin(phi) * sin_theta, cos_theta);
return ret;
}
__device__
float evaluate(float cos_theta) const
{
return (1.0f - g * g) / (4.0f * M_PI * pow(1.0f + g * g - 2 * g * cos_theta, 1.5f));
}
public:
__device__
HGPhaseFunction(float g)
: g(g)
{
}
__device__
float3 sample(const Frame& frame, float rnd0, float rnd1) const
{
float3 s = sample(rnd0, rnd1);
return frame.toWorld(s);
}
__device__
float evaluate(const Frame& frame, const float3& dir) const
{
float cos_theta = dot(frame.normal(), dir);
return evaluate(cos_theta);
}
};
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray
{
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__
float vol_sigma_t(const float3& pos, float density)
{
// return density;
// // remap position to [0, 1] coordinates
// float t = tex3D(density_tex, pos.x * 0.5f + 0.5f, pos.y * 0.5f + 0.5f, pos.z * 0.5f + 0.5f);
// t = clamp(t, 0.0f, 1.0f) * density;
// return t;
// float x = pos.x * 0.5f + 0.5f;
// float y = pos.y * 0.5f + 0.5f;
// float z = pos.z * 0.5f + 0.5f;
// int xi = (int)ceil(5.0 * x);
// int yi = (int)ceil(5.0 * y);
// int zi = (int)ceil(5.0 * z);
// return float((xi + yi + zi) & 0x01) * density;
FractalJuliaSet fract;
return fract.density(pos) * density;
}
__device__
float Tr(
const float3 boxMin,
const float3 boxMax,
const float3& start_point,
const float3& end_point,
float inv_sigma,
float density,
CudaRng& rng)
{
Ray shadow_ray;
shadow_ray.o = start_point;
shadow_ray.d = normalize(end_point - start_point);
float t_near, t_far;
bool shade_vol = intersectBox(shadow_ray, boxMin, boxMax, &t_near, &t_far);
if (!shade_vol)
{
return 1.0f;
}
if (t_near < 0.0f) t_near = 0.0f; // clamp to near plane
float max_t = min(t_far, length(start_point - end_point));
float dist = t_near;
for (;;)
{
dist += -log(rng.next()) * inv_sigma;
if (dist >= max_t)
{
break;
}
float3 pos = shadow_ray.o + shadow_ray.d * dist;
if (rng.next() < vol_sigma_t(pos, density) * inv_sigma)
{
break;
}
}
return float(dist >= max_t);
}
__device__ __forceinline__
float4 background(const float3& dir)
{
return make_float4(0.15f, 0.20f, 0.25f, 1.0f) * 0.5f * (dir.y + 0.5);
}
__global__ void
__d_render(float4 *d_output, CudaRng *rngs, const Param P)
{
const float density = P.density;
const float brightness = P.brightness;
const float albedo = P.albedo;
const float3 light_pos = make_float3(100, 100, 100);
const float3 light_power = make_float3(1.0, 0.9, 0.8);
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= P.width) || (y >= P.height)) return;
CudaRng& rng = rngs[x + y * P.width];
float u = (x / (float) P.width) * 2.0f - 1.0f;
float v = (y / (float) P.height) * 2.0f - 1.0f;
HGPhaseFunction phase(P.g);
// calculate eye ray in world space
Ray cr;
cr.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
cr.d = normalize(make_float3(u, v, -2.0f));
cr.d = mul(c_invViewMatrix, cr.d);
float4 radiance = make_float4(0.0f);
float throughput = 1.0f;
int i;
for (i = 0; i < 20000; i++)
{
// find intersection with box
float t_near, t_far;
int hit = intersectBox(cr, boxMin, boxMax, &t_near, &t_far);
if (!hit)
{
radiance += background(cr.d) * throughput;
break;
}
if (t_near < 0.0f)
{
t_near = 0.0f; // clamp to near plane
}
/// woodcock tracking / delta tracking
float3 pos = cr.o + cr.d * t_near; // current position
float dist = t_near;
float max_sigma_t = density;
float inv_sigma = 1.0f / max_sigma_t;
bool through = false;
// delta tracking scattering event sampling
for (;;)
{
dist += -log(rng.next()) * inv_sigma;
pos = cr.o + cr.d * dist;
if (dist >= t_far)
{
through = true; // transmitted through the volume, probability is 1-exp(-optical_thickness)
break;
}
if (rng.next() < vol_sigma_t(pos, density) * inv_sigma)
{
break;
}
}
// probability is exp(-optical_thickness)
if (through)
{
radiance += background(cr.d) * throughput;
break;
}
throughput *= albedo;
Frame frame(cr.d);
// direct lighting
float a = Tr(boxMin, boxMax, pos, light_pos, inv_sigma, density, rng);
radiance += make_float4(light_power * (throughput * phase.evaluate(frame, normalize(light_pos - pos)) * a), 0.0f);
// scattered direction
float3 new_dir = phase.sample(frame, rng.next(), rng.next());
cr.o = pos;
cr.d = new_dir;
}
radiance *= brightness;
// write output color
float heat = i * 0.001;
d_output[x + y * P.width] += make_float4(
max(radiance.x, 0.0f),
max(radiance.y, 0.0f),
max(radiance.z, 0.0f),
heat);
}
extern "C"
void set_texture_filter_mode(bool bLinearFilter)
{
density_tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint;
}
extern "C"
void init_cuda(void *h_volume, cudaExtent volumeSize)
{
// create 3D array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>();
checkCudaErrors(cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(h_volume,
volumeSize.width * sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaMemcpy3D(©Params));
// set texture parameters
density_tex.normalized = true; // access with normalized texture coordinates
density_tex.filterMode = cudaFilterModeLinear; // linear interpolation
density_tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
density_tex.addressMode[1] = cudaAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(cudaBindTextureToArray(density_tex, d_volumeArray, channelDesc));
}
extern "C"
void copy_inv_view_matrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
extern "C"
void free_cuda_buffers()
{
checkCudaErrors(cudaFreeArray(d_volumeArray));
}
namespace XORShift
{
// XOR shift PRNG
unsigned int x = 123456789;
unsigned int y = 362436069;
unsigned int z = 521288629;
unsigned int w = 88675123;
inline unsigned int frand()
{
unsigned int t;
t = x ^ (x << 11);
x = y; y = z; z = w;
return (w = (w ^ (w >> 19)) ^ (t ^ (t >> 8)));
}
}
__global__
void __init_rng(CudaRng *rng, int width, int height, unsigned int *seeds)
{
uint x = blockIdx.x * blockDim.x + threadIdx.x;
uint y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x >= width) || (y >= height))
{
return;
}
int idx = x + y * width;
rng[idx].init(seeds[idx]);
}
extern "C"
void init_rng(dim3 gridSize, dim3 blockSize, int width, int height)
{
cout << "init cuda rng to " << width << " x " << height << endl;
unsigned int *seeds;
checkCudaErrors(cudaMallocManaged(&seeds, sizeof(unsigned int) * width * height));
checkCudaErrors(cudaDeviceSynchronize());
for (int i = 0; i < width * height; ++i)
{
seeds[i] = XORShift::frand();
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMalloc(&cuda_rng, sizeof(CudaRng) * width * height));
__init_rng << <gridSize, blockSize >> >(cuda_rng, width, height, seeds);
checkCudaErrors(cudaFree(seeds));
}
extern "C"
void free_rng()
{
cout << "free cuda rng" << endl;
checkCudaErrors(cudaFree(cuda_rng));
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, float4 *d_output, const Param& p)
{
__d_render<<<gridSize, blockSize>>>(d_output, cuda_rng, p);
}
__global__
void __scale(float4 *dst, float4 *src, int size, float scale)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size)
{
return;
}
dst[idx] = src[idx] * scale;
}
extern "C"
void scale(float4 *dst, float4 *src, int size, float scale)
{
__scale << <(size + 256 - 1) / 256, 256 >> >(dst, src, size, scale);
}
|
64cc0dbd60a64dacc240f4108a8cb18e172cbc8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) {
float compute_local[4];
__shared__ float A_shared[16];
__shared__ float B_shared[256];
float A_shared_local[1];
float B_shared_local[4];
for (int j_c_init = 0; j_c_init < 4; ++j_c_init) {
compute_local[(j_c_init)] = 0.000000e+00f;
}
for (int k_outer = 0; k_outer < 32; ++k_outer) {
__syncthreads();
A_shared[((((int)threadIdx.x) * 4))] = ((float*)A)[((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))];
A_shared[(((((int)threadIdx.x) * 4) + 1))] = ((float*)A)[(((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];
A_shared[(((((int)threadIdx.x) * 4) + 2))] = ((float*)A)[(((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];
A_shared[(((((int)threadIdx.x) * 4) + 3))] = ((float*)A)[(((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];
for (int ax1_inner = 0; ax1_inner < 16; ++ax1_inner) {
B_shared[(((ax1_inner * 16) + (((int)threadIdx.x) * 4)))] = ((float*)B)[(((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))];
B_shared[((((ax1_inner * 16) + (((int)threadIdx.x) * 4)) + 1))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];
B_shared[((((ax1_inner * 16) + (((int)threadIdx.x) * 4)) + 2))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];
B_shared[((((ax1_inner * 16) + (((int)threadIdx.x) * 4)) + 3))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];
}
__syncthreads();
for (int k_inner = 0; k_inner < 16; ++k_inner) {
A_shared_local[(0)] = A_shared[(k_inner)];
B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 64) + k_inner))];
B_shared_local[(1)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 16))];
B_shared_local[(2)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 32))];
B_shared_local[(3)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 48))];
compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (A_shared_local[(0)] * B_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (A_shared_local[(0)] * B_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (A_shared_local[(0)] * B_shared_local[(3)]));
}
}
((float*)compute)[((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)))] = compute_local[(0)];
((float*)compute)[(((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = compute_local[(1)];
((float*)compute)[(((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = compute_local[(2)];
((float*)compute)[(((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = compute_local[(3)];
}
| 64cc0dbd60a64dacc240f4108a8cb18e172cbc8d.cu | extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) {
float compute_local[4];
__shared__ float A_shared[16];
__shared__ float B_shared[256];
float A_shared_local[1];
float B_shared_local[4];
for (int j_c_init = 0; j_c_init < 4; ++j_c_init) {
compute_local[(j_c_init)] = 0.000000e+00f;
}
for (int k_outer = 0; k_outer < 32; ++k_outer) {
__syncthreads();
A_shared[((((int)threadIdx.x) * 4))] = ((float*)A)[((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))];
A_shared[(((((int)threadIdx.x) * 4) + 1))] = ((float*)A)[(((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];
A_shared[(((((int)threadIdx.x) * 4) + 2))] = ((float*)A)[(((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];
A_shared[(((((int)threadIdx.x) * 4) + 3))] = ((float*)A)[(((((((int)blockIdx.y) * 512) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];
for (int ax1_inner = 0; ax1_inner < 16; ++ax1_inner) {
B_shared[(((ax1_inner * 16) + (((int)threadIdx.x) * 4)))] = ((float*)B)[(((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)))];
B_shared[((((ax1_inner * 16) + (((int)threadIdx.x) * 4)) + 1))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 1))];
B_shared[((((ax1_inner * 16) + (((int)threadIdx.x) * 4)) + 2))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 2))];
B_shared[((((ax1_inner * 16) + (((int)threadIdx.x) * 4)) + 3))] = ((float*)B)[((((((((int)blockIdx.x) * 8192) + (ax1_inner * 512)) + (k_outer * 16)) + (((int)threadIdx.x) * 4)) + 3))];
}
__syncthreads();
for (int k_inner = 0; k_inner < 16; ++k_inner) {
A_shared_local[(0)] = A_shared[(k_inner)];
B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 64) + k_inner))];
B_shared_local[(1)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 16))];
B_shared_local[(2)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 32))];
B_shared_local[(3)] = B_shared[((((((int)threadIdx.x) * 64) + k_inner) + 48))];
compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (A_shared_local[(0)] * B_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (A_shared_local[(0)] * B_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (A_shared_local[(0)] * B_shared_local[(3)]));
}
}
((float*)compute)[((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)))] = compute_local[(0)];
((float*)compute)[(((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = compute_local[(1)];
((float*)compute)[(((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = compute_local[(2)];
((float*)compute)[(((((((int)blockIdx.y) * 2048) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = compute_local[(3)];
}
|
cab0149bb1886b2e08d047d2aa517ddb8427db2e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_4_a;
int xdim0_update_halo_kernel3_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_4_a;
int ydim0_update_halo_kernel3_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_4_a;
int xdim1_update_halo_kernel3_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_4_a;
int ydim1_update_halo_kernel3_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_plus_4_a * (y) + \
xdim0_update_halo_kernel3_plus_4_a * ydim0_update_halo_kernel3_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_plus_4_a * (y) + \
xdim1_update_halo_kernel3_plus_4_a * ydim1_update_halo_kernel3_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel3_plus_4_a(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_a *
ydim0_update_halo_kernel3_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_a *
ydim1_update_halo_kernel3_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_4_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 105))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(105, "update_halo_kernel3_plus_4_a");
OPS_kernels[105].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel3_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel3_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel3_plus_4_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel3_plus_4_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel3_plus_4_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel3_plus_4_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel3_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[105].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_4_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[105].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[105].mpi_time += t2 - t1;
OPS_kernels[105].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[105].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| cab0149bb1886b2e08d047d2aa517ddb8427db2e.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_4_a;
int xdim0_update_halo_kernel3_plus_4_a_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_4_a;
int ydim0_update_halo_kernel3_plus_4_a_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_4_a;
int xdim1_update_halo_kernel3_plus_4_a_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_4_a;
int ydim1_update_halo_kernel3_plus_4_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_plus_4_a * (y) + \
xdim0_update_halo_kernel3_plus_4_a * ydim0_update_halo_kernel3_plus_4_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_plus_4_a * (y) + \
xdim1_update_halo_kernel3_plus_4_a * ydim1_update_halo_kernel3_plus_4_a * \
(z))
// user function
__device__
inline void
update_halo_kernel3_plus_4_a(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_4_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_a *
ydim0_update_halo_kernel3_plus_4_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_a *
ydim1_update_halo_kernel3_plus_4_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_4_a(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_plus_4_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 105))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(105, "update_halo_kernel3_plus_4_a");
OPS_kernels[105].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_4_a_h ||
ydim0 != ydim0_update_halo_kernel3_plus_4_a_h ||
xdim1 != xdim1_update_halo_kernel3_plus_4_a_h ||
ydim1 != ydim1_update_halo_kernel3_plus_4_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_a, &xdim0, sizeof(int));
xdim0_update_halo_kernel3_plus_4_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_a, &ydim0, sizeof(int));
ydim0_update_halo_kernel3_plus_4_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_a, &xdim1, sizeof(int));
xdim1_update_halo_kernel3_plus_4_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_a, &ydim1, sizeof(int));
ydim1_update_halo_kernel3_plus_4_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[105].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel3_plus_4_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[105].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[105].mpi_time += t2 - t1;
OPS_kernels[105].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[105].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
84fe96796c44a37c9718e43616a076685382ce08.hip | // !!! This is a file automatically generated by hipify!!!
#include <sys/time.h>
#include <stdio.h>
//TODO for writing to file, will be deleted
#include <stdlib.h>
//TODO: could include later
//#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
//#include "../inc/helper_cuda.h"
#define NUM_STREAMS 8
double timeStampB;
double timeStampC;
double timeStampD;
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
void myCallBackB(hipStream_t stream,hipError_t status, void* userData ){
timeStampB=getTimeStamp();
}
void myCallBackC(hipStream_t stream,hipError_t status, void* userData ){
timeStampC=getTimeStamp();
}
void myCallBackD(hipStream_t stream,hipError_t status, void* userData ){
timeStampD=getTimeStamp();
}
void initDataA(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float) (i+j)/3.0;
}
}
}
void initDataB(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float)3.14*(i+j);
}
}
}
void debugPrint(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
printf("%f ",data[i*ny + j]);
}
printf("\n");
}
printf("\n");
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
int i;
for(i = 0; i < nx*ny; i++){
C[i] = A[i] + B[i];
}
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, int len/*, int padrow*/){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
//__shared__ float sA[32][32];
//__shared__ float sB[32][32];
//__shared__ float sC[32][32];
int ix = threadIdx.x;
int iy = threadIdx.y*blockDim.x + blockIdx.x*blockDim.x*blockDim.y;
int idx = iy + ix ;
//int col = idx-padrow*(int)(idx/padrow);
//if(idx<nx*padrow && col<ny){
//if(idx<gridDim.x/4*blockDim.x*blockDim.y){
//int sidx = threadIdx.y*blockDim.x + threadIdx.x;
//int size = ((nx*ny-idx)<4) ? (nx*ny-idx) : 4;
//int size=4;
//if((ny-col)<4){
// size = ny-col;
//}
//if(col<4){
// size += col;
//}
//float tmpA[4];
//float tmpB[4];
//memcpy(tmpA,&A[idx],size);
//memcpy(tmpB,&B[idx],size);
//for(int j = 0; j < size; j++){
// tmpB[j] += tmpA[j];
//}
//memcpy(&B[idx],tmpB,size);
//printf("sidx is %d, idx is %d, size is %d\n", sidx, idx, size);
#pragma unroll
for(int i = idx; i < len; i+=gridDim.x*blockDim.x*blockDim.y){
//sA[threadIdx.x][threadIdx.y] = A[i];
//sB[threadIdx.x][threadIdx.y] = B[i];
//__syncthreads();
//sC[threadIdx.x][threadIdx.y] = sA[threadIdx.x][threadIdx.y] + sB[threadIdx.x][threadIdx.y];
//__syncthreads();
//C[i] = sC[threadIdx.x][threadIdx.y];
//printf("index %d\n",i);
B[i] += A[i];
}
//}
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc != 3) {
printf("Error: wrong number of args\n") ;
exit(1) ;
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
int noElems = nx*ny ;
int bytes = noElems * sizeof(float) ;
// but you may want to pad the matrices
// alloc memory host-side
float *h_hA = (float *) malloc( bytes ) ;
float *h_hB = (float *) malloc( bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
//float *h_dC = (float *) malloc( bytes ) ; // gpu result
float *h_A, *h_B, *h_dC;
float *d_A, *d_B ;
hipHostMalloc((void**)&h_A,bytes,hipHostMallocWriteCombined|hipHostMallocMapped);
hipHostMalloc((void**)&h_B,bytes,hipHostMallocWriteCombined|hipHostMallocMapped);
hipHostMalloc((void**)&h_dC,bytes,hipHostMallocWriteCombined);
//hipHostGetDevicePointer( &d_A, h_A, 0 );
//hipHostGetDevicePointer( &d_B, h_B, 0 );
//hipHostGetDevicePointer( &d_C, h_dC, 0 );
// init matrices with random data
//initData( h_A, noElems ) ; initData( h_B, noElems ) ;
initDataA(h_A, nx, ny);
initDataB(h_B, nx, ny);
initDataA(h_hA, nx, ny);
initDataB(h_hB, nx, ny);
// alloc memory dev-side
hipMalloc( (void **) &d_A, bytes ) ;
hipMalloc( (void **) &d_B, bytes ) ;
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
double timeStampA = getTimeStamp() ;
//transfer data to dev
//hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice ) ;
//hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice ) ;
//printf("pA is %d and pB is %d\n",pitchA,pitchB);
//hipMemcpy2D( d_A, pitchA, h_A,ny*sizeof(float),ny*sizeof(float),nx,hipMemcpyHostToDevice ) ;
//hipMemcpy2D( d_B, pitchB, h_B,ny*sizeof(float),ny*sizeof(float),nx,hipMemcpyHostToDevice ) ;
// note that the transfers would be twice as fast if h_A and h_B
// matrices are pinned
//double timeStampB = getTimeStamp() ;
//double timeStampC = getTimeStamp() ;
//double timeStampD = getTimeStamp() ;
// invoke Kernel
dim3 block( 32, 32 ) ; // you will want to configure this
//int block = 64;
//int grid = (noElems + block-1)/block;
int grid = ((noElems+3)/4/NUM_STREAMS + block.x*block.y-1)/(block.x*block.y);
//int grid = ((noElems/NUM_STREAMS+3)/4 + block.x*block.y-1)/(block.x*block.y);
//int grid = (((pitchA/4*nx*sizeof(float))+3)/4 + block.x*block.y-1)/(block.x*block.y);
//dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y ) ;
//hipDeviceProp_t GPUprop;
//hipGetDeviceProperties(&GPUprop,0);
//printf("sharedmemperblk is %d\n",GPUprop.sharedMemPerBlock);
//printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]);
//printf("noelems is %d\n",noElems);
//printf("prev num is %d\n",noElems/NUM_STREAMS);
//printf("align num is %d\n",noElems/NUM_STREAMS-(noElems/NUM_STREAMS)%8);
int align_idx = noElems/NUM_STREAMS-(noElems/NUM_STREAMS)%8;
//printf("grid is %d\n",grid);
//printf("gridx is %d and grid y is %d\n",grid.x,grid.y);
//f_addmat<<<grid, block>>>( d_A, d_B, nx, ny/*, pitchA/(sizeof(float))*/ ) ;
//hipDeviceSynchronize() ;
hipStream_t stream[NUM_STREAMS+1];
for (int i = 1; i < NUM_STREAMS+1; i++){
hipStreamCreate(&(stream[i]));
}
int i;
for(i = 1; i < NUM_STREAMS; i++){
hipMemcpyAsync(&d_A[(i-1)*align_idx],&h_A[(i-1)*align_idx],align_idx*sizeof(float),hipMemcpyHostToDevice,stream[i]);
hipMemcpyAsync(&d_B[(i-1)*align_idx],&h_B[(i-1)*align_idx],align_idx*sizeof(float),hipMemcpyHostToDevice,stream[i]);
hipStreamAddCallback(stream[i],myCallBackB,(void*)&i,0);
//printf("index is %d, num is %d\n",(i-1)*nx*ny/NUM_STREAMS,nx*ny/NUM_STREAMS );
hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, stream[i], d_A+(i-1)*align_idx, d_B+(i-1)*align_idx,align_idx) ;
hipStreamAddCallback(stream[i],myCallBackC,(void*)&i,0);
hipMemcpyAsync(&h_dC[(i-1)*align_idx],&d_B[(i-1)*align_idx],align_idx*sizeof(float),hipMemcpyDeviceToHost,stream[i]);
hipStreamAddCallback(stream[i],myCallBackD,(void*)&i,0);
}
grid =((noElems-(NUM_STREAMS-1)*align_idx+3)/4+ block.x*block.y-1)/(block.x*block.y);
//printf("grid final is %d\n",grid);
//printf("index is %d, num is %d\n",(NUM_STREAMS-1)*nx*ny/NUM_STREAMS,nx*ny-(NUM_STREAMS-1)*nx*ny/NUM_STREAMS);
hipMemcpyAsync(&d_A[(NUM_STREAMS-1)*align_idx],&h_A[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),hipMemcpyHostToDevice,stream[NUM_STREAMS]);
hipMemcpyAsync(&d_B[(NUM_STREAMS-1)*align_idx],&h_B[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),hipMemcpyHostToDevice,stream[NUM_STREAMS]);
hipStreamAddCallback(stream[i],myCallBackB,(void*)&i,0);
hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, stream[NUM_STREAMS], d_A+(NUM_STREAMS-1)*align_idx, d_B+(NUM_STREAMS-1)*align_idx,noElems-(NUM_STREAMS-1)*align_idx) ;
hipStreamAddCallback(stream[i],myCallBackC,(void*)&i,0);
hipMemcpyAsync(&h_dC[(NUM_STREAMS-1)*align_idx],&d_B[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),hipMemcpyDeviceToHost,stream[NUM_STREAMS]);
hipStreamAddCallback(stream[i],myCallBackD,(void*)&i,0);
for(int i = 1; i < NUM_STREAMS+1; i++){
hipStreamSynchronize(stream[i]);
}
//f_addmat<<<grid, block>>>( d_A, d_B, nx*ny/*, pitchA/(sizeof(float))*/ ) ;
//hipDeviceSynchronize() ;
//copy data back
//hipMemcpy( h_dC, d_B, bytes, hipMemcpyDeviceToHost ) ;
//hipMemcpy2D( h_ddC, pitchB, d_B,ny*sizeof(float),ny*sizeof(float),nx,hipMemcpyDeviceToHost ) ;
// check result
h_addmat( h_hA, h_hB, h_hC, nx, ny ) ;
//for(int i = 0; i < nx; i++){
// for(int j = 0; j < pitchC/4; j++){
// if(j<ny){
// h_dC[i*ny+j] = h_ddC[i*pitchC/4+j];
// }
// }
//}
// print out results
if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){
//if(1){
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
FILE* fptr;
fptr = fopen("time.log","a");
fprintf(fptr,"%dX%d %.6f %.6f %.6f %.6f\n", nx, ny, timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
fclose(fptr);
printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
}else{
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
printf("Error: Results not matched.\n");
}
// free GPU resources
hipHostFree( h_A ) ; hipHostFree( h_B ) ; hipHostFree( h_dC ) ;
hipDeviceReset() ;
}
| 84fe96796c44a37c9718e43616a076685382ce08.cu | #include <sys/time.h>
#include <stdio.h>
//TODO for writing to file, will be deleted
#include <stdlib.h>
//TODO: could include later
//#include <device_launch_parameters.h>
#include <cuda_runtime.h>
//#include "../inc/helper_cuda.h"
#define NUM_STREAMS 8
double timeStampB;
double timeStampC;
double timeStampD;
// time stamp function in seconds
double getTimeStamp() {
struct timeval tv ;
gettimeofday( &tv, NULL ) ;
return (double) tv.tv_usec/1000000 + tv.tv_sec ;
}
void myCallBackB(cudaStream_t stream,cudaError_t status, void* userData ){
timeStampB=getTimeStamp();
}
void myCallBackC(cudaStream_t stream,cudaError_t status, void* userData ){
timeStampC=getTimeStamp();
}
void myCallBackD(cudaStream_t stream,cudaError_t status, void* userData ){
timeStampD=getTimeStamp();
}
void initDataA(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float) (i+j)/3.0;
}
}
}
void initDataB(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
data[i*ny + j] = (float)3.14*(i+j);
}
}
}
void debugPrint(float* data, int nx, int ny){
int i,j;
for(i = 0; i < nx; i++){
for(j = 0; j < ny; j++){
printf("%f ",data[i*ny + j]);
}
printf("\n");
}
printf("\n");
}
// host side matrix addition
void h_addmat(float *A, float *B, float *C, int nx, int ny){
int i;
for(i = 0; i < nx*ny; i++){
C[i] = A[i] + B[i];
}
}
// device-side matrix addition
__global__ void f_addmat( float *A, float *B, int len/*, int padrow*/){
// kernel code might look something like this
// but you may want to pad the matrices and index into them accordingly
//__shared__ float sA[32][32];
//__shared__ float sB[32][32];
//__shared__ float sC[32][32];
int ix = threadIdx.x;
int iy = threadIdx.y*blockDim.x + blockIdx.x*blockDim.x*blockDim.y;
int idx = iy + ix ;
//int col = idx-padrow*(int)(idx/padrow);
//if(idx<nx*padrow && col<ny){
//if(idx<gridDim.x/4*blockDim.x*blockDim.y){
//int sidx = threadIdx.y*blockDim.x + threadIdx.x;
//int size = ((nx*ny-idx)<4) ? (nx*ny-idx) : 4;
//int size=4;
//if((ny-col)<4){
// size = ny-col;
//}
//if(col<4){
// size += col;
//}
//float tmpA[4];
//float tmpB[4];
//memcpy(tmpA,&A[idx],size);
//memcpy(tmpB,&B[idx],size);
//for(int j = 0; j < size; j++){
// tmpB[j] += tmpA[j];
//}
//memcpy(&B[idx],tmpB,size);
//printf("sidx is %d, idx is %d, size is %d\n", sidx, idx, size);
#pragma unroll
for(int i = idx; i < len; i+=gridDim.x*blockDim.x*blockDim.y){
//sA[threadIdx.x][threadIdx.y] = A[i];
//sB[threadIdx.x][threadIdx.y] = B[i];
//__syncthreads();
//sC[threadIdx.x][threadIdx.y] = sA[threadIdx.x][threadIdx.y] + sB[threadIdx.x][threadIdx.y];
//__syncthreads();
//C[i] = sC[threadIdx.x][threadIdx.y];
//printf("index %d\n",i);
B[i] += A[i];
}
//}
}
int main( int argc, char *argv[] ) {
// get program arguments
if( argc != 3) {
printf("Error: wrong number of args\n") ;
exit(1) ;
}
int nx = atoi( argv[1] ) ; // should check validity
int ny = atoi( argv[2] ) ; // should check validity
int noElems = nx*ny ;
int bytes = noElems * sizeof(float) ;
// but you may want to pad the matrices…
// alloc memory host-side
float *h_hA = (float *) malloc( bytes ) ;
float *h_hB = (float *) malloc( bytes ) ;
float *h_hC = (float *) malloc( bytes ) ; // host result
//float *h_dC = (float *) malloc( bytes ) ; // gpu result
float *h_A, *h_B, *h_dC;
float *d_A, *d_B ;
cudaHostAlloc((void**)&h_A,bytes,cudaHostAllocWriteCombined|cudaHostAllocMapped);
cudaHostAlloc((void**)&h_B,bytes,cudaHostAllocWriteCombined|cudaHostAllocMapped);
cudaHostAlloc((void**)&h_dC,bytes,cudaHostAllocWriteCombined);
//cudaHostGetDevicePointer( &d_A, h_A, 0 );
//cudaHostGetDevicePointer( &d_B, h_B, 0 );
//cudaHostGetDevicePointer( &d_C, h_dC, 0 );
// init matrices with random data
//initData( h_A, noElems ) ; initData( h_B, noElems ) ;
initDataA(h_A, nx, ny);
initDataB(h_B, nx, ny);
initDataA(h_hA, nx, ny);
initDataB(h_hB, nx, ny);
// alloc memory dev-side
cudaMalloc( (void **) &d_A, bytes ) ;
cudaMalloc( (void **) &d_B, bytes ) ;
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
double timeStampA = getTimeStamp() ;
//transfer data to dev
//cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice ) ;
//cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice ) ;
//printf("pA is %d and pB is %d\n",pitchA,pitchB);
//cudaMemcpy2D( d_A, pitchA, h_A,ny*sizeof(float),ny*sizeof(float),nx,cudaMemcpyHostToDevice ) ;
//cudaMemcpy2D( d_B, pitchB, h_B,ny*sizeof(float),ny*sizeof(float),nx,cudaMemcpyHostToDevice ) ;
// note that the transfers would be twice as fast if h_A and h_B
// matrices are pinned
//double timeStampB = getTimeStamp() ;
//double timeStampC = getTimeStamp() ;
//double timeStampD = getTimeStamp() ;
// invoke Kernel
dim3 block( 32, 32 ) ; // you will want to configure this
//int block = 64;
//int grid = (noElems + block-1)/block;
int grid = ((noElems+3)/4/NUM_STREAMS + block.x*block.y-1)/(block.x*block.y);
//int grid = ((noElems/NUM_STREAMS+3)/4 + block.x*block.y-1)/(block.x*block.y);
//int grid = (((pitchA/4*nx*sizeof(float))+3)/4 + block.x*block.y-1)/(block.x*block.y);
//dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y ) ;
//cudaDeviceProp GPUprop;
//cudaGetDeviceProperties(&GPUprop,0);
//printf("sharedmemperblk is %d\n",GPUprop.sharedMemPerBlock);
//printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]);
//printf("noelems is %d\n",noElems);
//printf("prev num is %d\n",noElems/NUM_STREAMS);
//printf("align num is %d\n",noElems/NUM_STREAMS-(noElems/NUM_STREAMS)%8);
int align_idx = noElems/NUM_STREAMS-(noElems/NUM_STREAMS)%8;
//printf("grid is %d\n",grid);
//printf("gridx is %d and grid y is %d\n",grid.x,grid.y);
//f_addmat<<<grid, block>>>( d_A, d_B, nx, ny/*, pitchA/(sizeof(float))*/ ) ;
//cudaDeviceSynchronize() ;
cudaStream_t stream[NUM_STREAMS+1];
for (int i = 1; i < NUM_STREAMS+1; i++){
cudaStreamCreate(&(stream[i]));
}
int i;
for(i = 1; i < NUM_STREAMS; i++){
cudaMemcpyAsync(&d_A[(i-1)*align_idx],&h_A[(i-1)*align_idx],align_idx*sizeof(float),cudaMemcpyHostToDevice,stream[i]);
cudaMemcpyAsync(&d_B[(i-1)*align_idx],&h_B[(i-1)*align_idx],align_idx*sizeof(float),cudaMemcpyHostToDevice,stream[i]);
cudaStreamAddCallback(stream[i],myCallBackB,(void*)&i,0);
//printf("index is %d, num is %d\n",(i-1)*nx*ny/NUM_STREAMS,nx*ny/NUM_STREAMS );
f_addmat<<<grid, block, 0, stream[i]>>>( d_A+(i-1)*align_idx, d_B+(i-1)*align_idx,align_idx) ;
cudaStreamAddCallback(stream[i],myCallBackC,(void*)&i,0);
cudaMemcpyAsync(&h_dC[(i-1)*align_idx],&d_B[(i-1)*align_idx],align_idx*sizeof(float),cudaMemcpyDeviceToHost,stream[i]);
cudaStreamAddCallback(stream[i],myCallBackD,(void*)&i,0);
}
grid =((noElems-(NUM_STREAMS-1)*align_idx+3)/4+ block.x*block.y-1)/(block.x*block.y);
//printf("grid final is %d\n",grid);
//printf("index is %d, num is %d\n",(NUM_STREAMS-1)*nx*ny/NUM_STREAMS,nx*ny-(NUM_STREAMS-1)*nx*ny/NUM_STREAMS);
cudaMemcpyAsync(&d_A[(NUM_STREAMS-1)*align_idx],&h_A[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),cudaMemcpyHostToDevice,stream[NUM_STREAMS]);
cudaMemcpyAsync(&d_B[(NUM_STREAMS-1)*align_idx],&h_B[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),cudaMemcpyHostToDevice,stream[NUM_STREAMS]);
cudaStreamAddCallback(stream[i],myCallBackB,(void*)&i,0);
f_addmat<<<grid, block, 0, stream[NUM_STREAMS]>>>( d_A+(NUM_STREAMS-1)*align_idx, d_B+(NUM_STREAMS-1)*align_idx,noElems-(NUM_STREAMS-1)*align_idx) ;
cudaStreamAddCallback(stream[i],myCallBackC,(void*)&i,0);
cudaMemcpyAsync(&h_dC[(NUM_STREAMS-1)*align_idx],&d_B[(NUM_STREAMS-1)*align_idx],(noElems-(NUM_STREAMS-1)*align_idx)*sizeof(float),cudaMemcpyDeviceToHost,stream[NUM_STREAMS]);
cudaStreamAddCallback(stream[i],myCallBackD,(void*)&i,0);
for(int i = 1; i < NUM_STREAMS+1; i++){
cudaStreamSynchronize(stream[i]);
}
//f_addmat<<<grid, block>>>( d_A, d_B, nx*ny/*, pitchA/(sizeof(float))*/ ) ;
//cudaDeviceSynchronize() ;
//copy data back
//cudaMemcpy( h_dC, d_B, bytes, cudaMemcpyDeviceToHost ) ;
//cudaMemcpy2D( h_ddC, pitchB, d_B,ny*sizeof(float),ny*sizeof(float),nx,cudaMemcpyDeviceToHost ) ;
// check result
h_addmat( h_hA, h_hB, h_hC, nx, ny ) ;
//for(int i = 0; i < nx; i++){
// for(int j = 0; j < pitchC/4; j++){
// if(j<ny){
// h_dC[i*ny+j] = h_ddC[i*pitchC/4+j];
// }
// }
//}
// print out results
if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){
//if(1){
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
FILE* fptr;
fptr = fopen("time.log","a");
fprintf(fptr,"%dX%d %.6f %.6f %.6f %.6f\n", nx, ny, timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
fclose(fptr);
printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC);
}else{
//debugPrint(h_hC, nx, ny);
//debugPrint(h_dC, nx, ny);
printf("Error: Results not matched.\n");
}
// free GPU resources
cudaFreeHost( h_A ) ; cudaFreeHost( h_B ) ; cudaFreeHost( h_dC ) ;
cudaDeviceReset() ;
}
|
81996fb50b6d92a3d1408a1b7b0093c7fe089cf3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reinforcement_path_tracing.cuh"
//cuRand
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
__global__
void update_radiance_volume_distributions(RadianceMap* radiance_map){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < radiance_map->radiance_volumes_count){
radiance_map->radiance_volumes[i].update_radiance_distribution();
}
}
__global__
void draw_reinforcement_path_tracing(vec3* device_buffer, hiprandState_t* d_rand_state, RadianceMap* radiance_map, Camera* camera, Scene* scene, int* device_path_lengths, int* zero_contribution_light_paths){
// Populate the shared GPU/CPU screen buffer
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Path trace the ray to find the colour to paint the pixel
device_buffer[x*(int)SCREEN_HEIGHT + y] = path_trace_reinforcement(d_rand_state, radiance_map, camera, x, y, scene, device_path_lengths, zero_contribution_light_paths);
}
__device__
vec3 path_trace_reinforcement(hiprandState_t* d_rand_state, RadianceMap* radiance_map, Camera* camera, int pixel_x, int pixel_y, Scene* scene, int* device_path_lengths, int* zero_contribution_light_paths){
vec3 irradiance = vec3(0.f);
int total_path_lengths = 0;
for (int i = 0; i < SAMPLES_PER_PIXEL; i++){
// Trace the path of the ray
int path_length;
vec3 temp_irradiance = path_trace_reinforcement_iterative(pixel_x, pixel_y, camera, d_rand_state, radiance_map, scene, path_length);
irradiance += temp_irradiance;
total_path_lengths += path_length;
// Check if zero contribution light path
float avg_temp_irradiance = (temp_irradiance.x + temp_irradiance.y + temp_irradiance.z)/3.f;
if(avg_temp_irradiance < THROUGHPUT_THRESHOLD){
atomicAdd(zero_contribution_light_paths, 1);
}
}
int avg_path_length = int(total_path_lengths/SAMPLES_PER_PIXEL);
device_path_lengths[pixel_x*SCREEN_HEIGHT + pixel_y] = avg_path_length;
irradiance /= (float)SAMPLES_PER_PIXEL;
return irradiance;
}
__device__
vec3 path_trace_reinforcement_iterative(int pixel_x, int pixel_y, Camera* camera, hiprandState_t* d_rand_state, RadianceMap* radiance_map, Scene* scene, int& path_length){
Ray ray = Ray::sample_ray_through_pixel(d_rand_state, *camera, pixel_x, pixel_y);
vec3 throughput = vec3(1.f);
RadianceVolume* current_radiance_volume;
int current_sector_x = -1;
int current_sector_y = -1;
float current_BRDF = 0.f;
for (int i = 0; i < MAX_RAY_BOUNCES; i++){
// Trace the path of the ray to find the closest intersection
ray.closest_intersection(scene);
// We cannot update Q on the first bounce as it is the camera position,
// not a point in the scene. But we still need the closest radiance volume it intersects with
if (i > 0){
// Update Q
// where x = ray.start, y = intersection.position
// Check that a radiance volume has been found to update its sector
if (current_radiance_volume && current_sector_x != -1 && current_sector_y != -1){
current_radiance_volume = radiance_map->temporal_difference_update_radiance_volume_sector(current_BRDF, current_radiance_volume, current_sector_x, current_sector_y, ray.intersection, scene);
current_sector_x = -1;
current_sector_y = -1;
}
}
// Get the radiance volume for the first iteration
else{
if (ray.intersection.intersection_type == SURFACE)
current_radiance_volume = radiance_map->find_closest_radiance_volume_iterative(MAX_DIST, ray.intersection.position, ray.intersection.normal);
}
// Check what they ray intersected with...
switch(ray.intersection.intersection_type){
// Interescted with nothing, so no radiance
case NOTHING:
path_length = i+1;
return throughput * vec3(ENVIRONMENT_LIGHT);
break;
// Intersected with light plane, so return its diffuse_p
case AREA_LIGHT:
path_length= i+1;
return throughput * scene->area_lights[ray.intersection.index].diffuse_p;
break;
// Intersected with a surface (diffuse)
case SURFACE:
vec4 sampled_direction = vec4(0.f);
float pdf = 0.f;
radiance_map->importance_sample_ray_direction(d_rand_state, ray.intersection, current_sector_x, current_sector_y, pixel_x, pixel_y, sampled_direction, current_radiance_volume, pdf);
vec3 BRDF = scene->surfaces[ray.intersection.index].material.diffuse_c / (float)M_PI;
float cos_theta = dot(vec3(scene->surfaces[ray.intersection.index].normal), vec3(sampled_direction));
current_BRDF = (scene->surfaces[ray.intersection.index].material.luminance) / (float)M_PI;
throughput *= (BRDF * cos_theta) / pdf;
vec4 start = ray.intersection.position + sampled_direction * 0.00001f;
start.w = 1.f;
ray = Ray(start, sampled_direction);
break;
}
}
path_length = MAX_RAY_BOUNCES;
return vec3(0);
}
| 81996fb50b6d92a3d1408a1b7b0093c7fe089cf3.cu | #include "reinforcement_path_tracing.cuh"
//cuRand
#include <curand.h>
#include <curand_kernel.h>
__global__
void update_radiance_volume_distributions(RadianceMap* radiance_map){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < radiance_map->radiance_volumes_count){
radiance_map->radiance_volumes[i].update_radiance_distribution();
}
}
__global__
void draw_reinforcement_path_tracing(vec3* device_buffer, curandState* d_rand_state, RadianceMap* radiance_map, Camera* camera, Scene* scene, int* device_path_lengths, int* zero_contribution_light_paths){
// Populate the shared GPU/CPU screen buffer
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
// Path trace the ray to find the colour to paint the pixel
device_buffer[x*(int)SCREEN_HEIGHT + y] = path_trace_reinforcement(d_rand_state, radiance_map, camera, x, y, scene, device_path_lengths, zero_contribution_light_paths);
}
__device__
vec3 path_trace_reinforcement(curandState* d_rand_state, RadianceMap* radiance_map, Camera* camera, int pixel_x, int pixel_y, Scene* scene, int* device_path_lengths, int* zero_contribution_light_paths){
vec3 irradiance = vec3(0.f);
int total_path_lengths = 0;
for (int i = 0; i < SAMPLES_PER_PIXEL; i++){
// Trace the path of the ray
int path_length;
vec3 temp_irradiance = path_trace_reinforcement_iterative(pixel_x, pixel_y, camera, d_rand_state, radiance_map, scene, path_length);
irradiance += temp_irradiance;
total_path_lengths += path_length;
// Check if zero contribution light path
float avg_temp_irradiance = (temp_irradiance.x + temp_irradiance.y + temp_irradiance.z)/3.f;
if(avg_temp_irradiance < THROUGHPUT_THRESHOLD){
atomicAdd(zero_contribution_light_paths, 1);
}
}
int avg_path_length = int(total_path_lengths/SAMPLES_PER_PIXEL);
device_path_lengths[pixel_x*SCREEN_HEIGHT + pixel_y] = avg_path_length;
irradiance /= (float)SAMPLES_PER_PIXEL;
return irradiance;
}
__device__
vec3 path_trace_reinforcement_iterative(int pixel_x, int pixel_y, Camera* camera, curandState* d_rand_state, RadianceMap* radiance_map, Scene* scene, int& path_length){
Ray ray = Ray::sample_ray_through_pixel(d_rand_state, *camera, pixel_x, pixel_y);
vec3 throughput = vec3(1.f);
RadianceVolume* current_radiance_volume;
int current_sector_x = -1;
int current_sector_y = -1;
float current_BRDF = 0.f;
for (int i = 0; i < MAX_RAY_BOUNCES; i++){
// Trace the path of the ray to find the closest intersection
ray.closest_intersection(scene);
// We cannot update Q on the first bounce as it is the camera position,
// not a point in the scene. But we still need the closest radiance volume it intersects with
if (i > 0){
// Update Q
// where x = ray.start, y = intersection.position
// Check that a radiance volume has been found to update its sector
if (current_radiance_volume && current_sector_x != -1 && current_sector_y != -1){
current_radiance_volume = radiance_map->temporal_difference_update_radiance_volume_sector(current_BRDF, current_radiance_volume, current_sector_x, current_sector_y, ray.intersection, scene);
current_sector_x = -1;
current_sector_y = -1;
}
}
// Get the radiance volume for the first iteration
else{
if (ray.intersection.intersection_type == SURFACE)
current_radiance_volume = radiance_map->find_closest_radiance_volume_iterative(MAX_DIST, ray.intersection.position, ray.intersection.normal);
}
// Check what they ray intersected with...
switch(ray.intersection.intersection_type){
// Interescted with nothing, so no radiance
case NOTHING:
path_length = i+1;
return throughput * vec3(ENVIRONMENT_LIGHT);
break;
// Intersected with light plane, so return its diffuse_p
case AREA_LIGHT:
path_length= i+1;
return throughput * scene->area_lights[ray.intersection.index].diffuse_p;
break;
// Intersected with a surface (diffuse)
case SURFACE:
vec4 sampled_direction = vec4(0.f);
float pdf = 0.f;
radiance_map->importance_sample_ray_direction(d_rand_state, ray.intersection, current_sector_x, current_sector_y, pixel_x, pixel_y, sampled_direction, current_radiance_volume, pdf);
vec3 BRDF = scene->surfaces[ray.intersection.index].material.diffuse_c / (float)M_PI;
float cos_theta = dot(vec3(scene->surfaces[ray.intersection.index].normal), vec3(sampled_direction));
current_BRDF = (scene->surfaces[ray.intersection.index].material.luminance) / (float)M_PI;
throughput *= (BRDF * cos_theta) / pdf;
vec4 start = ray.intersection.position + sampled_direction * 0.00001f;
start.w = 1.f;
ray = Ray(start, sampled_direction);
break;
}
}
path_length = MAX_RAY_BOUNCES;
return vec3(0);
}
|
93e24fce7d3c0617169b7e60fd44e14b85b57aa1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void merge_kernel(ExecutionPolicy exec,
Iterator1 first1, Iterator1 last1,
Iterator2 first2, Iterator2 last2,
Iterator3 result1,
Iterator4 result2)
{
*result2 = thrust::merge(exec, first1, last1, first2, last2, result1);
}
template<typename ExecutionPolicy>
void TestMergeDevice(ExecutionPolicy exec)
{
size_t n = 10000;
size_t sizes[] = {0, 1, n / 2, n, n + 1, 2 * n};
size_t num_sizes = sizeof(sizes) / sizeof(size_t);
thrust::host_vector<int> random = unittest::random_integers<unittest::int8_t>(n + *thrust::max_element(sizes, sizes + num_sizes));
thrust::host_vector<int> h_a(random.begin(), random.begin() + n);
thrust::host_vector<int> h_b(random.begin() + n, random.end());
thrust::stable_sort(h_a.begin(), h_a.end());
thrust::stable_sort(h_b.begin(), h_b.end());
thrust::device_vector<int> d_a = h_a;
thrust::device_vector<int> d_b = h_b;
for(size_t i = 0; i < num_sizes; i++)
{
size_t size = sizes[i];
thrust::host_vector<int> h_result(n + size);
thrust::device_vector<int> d_result(n + size);
typename thrust::host_vector<int>::iterator h_end;
typedef typename thrust::device_vector<int>::iterator iter_type;
thrust::device_vector<iter_type> d_end(1);
h_end = thrust::merge(h_a.begin(), h_a.end(),
h_b.begin(), h_b.begin() + size,
h_result.begin());
h_result.resize(h_end - h_result.begin());
hipLaunchKernelGGL(( merge_kernel), dim3(1),dim3(1), 0, 0, exec,
d_a.begin(), d_a.end(),
d_b.begin(), d_b.begin() + size,
d_result.begin(),
d_end.begin());
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
d_result.resize((iter_type)d_end[0] - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
void TestMergeDeviceSeq()
{
TestMergeDevice(thrust::seq);
}
DECLARE_UNITTEST(TestMergeDeviceSeq);
void TestMergeDeviceDevice()
{
TestMergeDevice(thrust::device);
}
DECLARE_UNITTEST(TestMergeDeviceDevice);
#endif
void TestMergeCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(7);
ref[0] = 0;
ref[1] = 0;
ref[2] = 2;
ref[3] = 3;
ref[4] = 3;
ref[5] = 4;
ref[6] = 4;
Vector result(7);
hipStream_t s;
hipStreamCreate(&s);
Iterator end = thrust::merge(thrust::hip::par.on(s),
a.begin(), a.end(),
b.begin(), b.end(),
result.begin());
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
hipStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeCudaStreams);
| 93e24fce7d3c0617169b7e60fd44e14b85b57aa1.cu | #include <unittest/unittest.h>
#include <thrust/merge.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void merge_kernel(ExecutionPolicy exec,
Iterator1 first1, Iterator1 last1,
Iterator2 first2, Iterator2 last2,
Iterator3 result1,
Iterator4 result2)
{
*result2 = thrust::merge(exec, first1, last1, first2, last2, result1);
}
template<typename ExecutionPolicy>
void TestMergeDevice(ExecutionPolicy exec)
{
size_t n = 10000;
size_t sizes[] = {0, 1, n / 2, n, n + 1, 2 * n};
size_t num_sizes = sizeof(sizes) / sizeof(size_t);
thrust::host_vector<int> random = unittest::random_integers<unittest::int8_t>(n + *thrust::max_element(sizes, sizes + num_sizes));
thrust::host_vector<int> h_a(random.begin(), random.begin() + n);
thrust::host_vector<int> h_b(random.begin() + n, random.end());
thrust::stable_sort(h_a.begin(), h_a.end());
thrust::stable_sort(h_b.begin(), h_b.end());
thrust::device_vector<int> d_a = h_a;
thrust::device_vector<int> d_b = h_b;
for(size_t i = 0; i < num_sizes; i++)
{
size_t size = sizes[i];
thrust::host_vector<int> h_result(n + size);
thrust::device_vector<int> d_result(n + size);
typename thrust::host_vector<int>::iterator h_end;
typedef typename thrust::device_vector<int>::iterator iter_type;
thrust::device_vector<iter_type> d_end(1);
h_end = thrust::merge(h_a.begin(), h_a.end(),
h_b.begin(), h_b.begin() + size,
h_result.begin());
h_result.resize(h_end - h_result.begin());
merge_kernel<<<1,1>>>(exec,
d_a.begin(), d_a.end(),
d_b.begin(), d_b.begin() + size,
d_result.begin(),
d_end.begin());
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
d_result.resize((iter_type)d_end[0] - d_result.begin());
ASSERT_EQUAL(h_result, d_result);
}
}
void TestMergeDeviceSeq()
{
TestMergeDevice(thrust::seq);
}
DECLARE_UNITTEST(TestMergeDeviceSeq);
void TestMergeDeviceDevice()
{
TestMergeDevice(thrust::device);
}
DECLARE_UNITTEST(TestMergeDeviceDevice);
#endif
void TestMergeCudaStreams()
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(7);
ref[0] = 0;
ref[1] = 0;
ref[2] = 2;
ref[3] = 3;
ref[4] = 3;
ref[5] = 4;
ref[6] = 4;
Vector result(7);
cudaStream_t s;
cudaStreamCreate(&s);
Iterator end = thrust::merge(thrust::cuda::par.on(s),
a.begin(), a.end(),
b.begin(), b.end(),
result.begin());
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
cudaStreamDestroy(s);
}
DECLARE_UNITTEST(TestMergeCudaStreams);
|
6edb91ad8b254a0d464f0d457e6d855fec5f40f2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
zsymv_upper.cu is nearly identical to zhemv_upper.cu, just change names and drop MAGMA_Z_CONJ.
zhemv_kernel_U (upper) in zhemv_upper.cu is very similar to
zhemv_kernel_L (lower) in zhemv.cu; diff the two files to compare.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
zhemv_kernel_U(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_Z_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_Z_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_Z_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_Z_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end zhemv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
zhemv_kernel_U_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
| 6edb91ad8b254a0d464f0d457e6d855fec5f40f2.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
zsymv_upper.cu is nearly identical to zhemv_upper.cu, just change names and drop MAGMA_Z_CONJ.
zhemv_kernel_U (upper) in zhemv_upper.cu is very similar to
zhemv_kernel_L (lower) in zhemv.cu; diff the two files to compare.
@precisions normal z -> s d c
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#define PRECISION_z
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
zhemv_kernel_U(
int n,
magmaDoubleComplex const * __restrict__ A, int lda,
magmaDoubleComplex const * __restrict__ x, int incx,
magmaDoubleComplex * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
magmaDoubleComplex psum, psum_t;
magmaDoubleComplex total = MAGMA_Z_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ magmaDoubleComplex sx_blk[NB_X]; // for x[ blk ]
__shared__ magmaDoubleComplex sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
magmaDoubleComplex rA[4];
magmaDoubleComplex psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_Z_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_Z_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_Z_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_Z_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_Z_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_Z_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_Z_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_Z_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end zhemv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
zhemv_kernel_U_sum(
int n,
magmaDoubleComplex alpha,
int lda,
magmaDoubleComplex beta,
magmaDoubleComplex * __restrict__ y, int incy,
magmaDoubleComplex const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
magmaDoubleComplex Ax = MAGMA_Z_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
|
1747c16cbe5942121d3b07a61bc8218373cfa495.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
// TODO maybe change the way of detecting NaNs
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Blob<Dtype> *diffptr = diff_top_vec_[0];
Dtype dot, loss;
if(bottom.size() > 1) {
diff_layer_->Forward(bottom, diff_top_vec_);
}
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
hipLaunchKernelGGL(( FindNotNaNs<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), mask_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_);
normalize_coeff_ /= mask_.channels();
} else {
normalize_coeff_ = num;
}
if (this->layer_param_.l1_loss_param().l2_per_location()) {
// set masked (NaNs only) to zero
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if(this->layer_param_.l1_loss_param().plateau() > 0) {
float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau();
hipLaunchKernelGGL(( MaskPlateauValuesInitial<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
}
else {
// Mask plateau:
if(this->layer_param_.l1_loss_param().plateau() > 0) {
hipLaunchKernelGGL(( MaskPlateauValues<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau());
CUDA_POST_KERNEL_CHECK;
}
//mask_.print("MASK2");
// set masked (NaNs, plateau) to zero
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ComputeSign<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, diffptr->gpu_data(), sign_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot);
}
loss = dot / normalize_coeff_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
bool prop_down = propagate_down[0];
if(bottom.size() > 1) prop_down |= propagate_down[1];
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down) {
const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_;
if (this->layer_param_.l1_loss_param().l2_per_location()) {
vector<bool> prop_down(1,true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if(this->layer_param_.l1_loss_param().plateau() > 0) {
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(sum_output_.count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
hipDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
}
else {
caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(),
Dtype(0), diffptr->mutable_gpu_diff());
}
hipLaunchKernelGGL(( KillMasked<Dtype>), dim3(CAFFE_GET_BLOCKS(diffptr->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if(bottom.size() > 1) {
diff_layer_->Backward(diff_top_vec_, propagate_down, bottom);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer);
} // namespace caffe
| 1747c16cbe5942121d3b07a61bc8218373cfa495.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/io.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ComputeSign(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > 0 ? Dtype(1) : Dtype(-1);
}
}
// TODO maybe change the way of detecting NaNs
template <typename Dtype>
__global__ void FindNotNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? Dtype(1) : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillNaNs(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index]==in[index] ? in[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void KillMasked(const int n, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] > Dtype(0.5) ? out[index] : Dtype(0);
// out[index] = out[index]==out[index] ? out[index] : Dtype(0);
// out[index] = out[index]>1e3 ? 0 : out[index];
// out[index] = out[index]<-1e3 ? 0 : out[index];
}
}
template <typename Dtype>
__global__ void KillMaskedAcrossChannels(const int n, const int width_height, const Dtype* in, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int mask_idx = index % width_height;
out[index] = in[mask_idx] > Dtype(0.5) ? out[index] : Dtype(0);
}
}
template <typename Dtype>
__global__ void MaskPlateauValues(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
if(fabs(in[index]) < plateau) out[index] = Dtype(0); // Mask out plateau values and keep other as is
}
}
template <typename Dtype>
__global__ void MaskPlateauValuesInitial(const int n, const Dtype* in, Dtype* out, Dtype plateau) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = (fabs(in[index]) < plateau) ? Dtype(0) : Dtype(1);
}
}
template <typename Dtype>
void L1LossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top)
{
Blob<Dtype> *diffptr = diff_top_vec_[0];
Dtype dot, loss;
if(bottom.size() > 1) {
diff_layer_->Forward(bottom, diff_top_vec_);
}
// if necessary, compute the number of not-NaNs
int count = bottom[0]->count();
int num = bottom[0]->num();
FindNotNaNs<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), mask_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
if (this->layer_param_.l1_loss_param().normalize_by_num_entries()) {
caffe_gpu_dot(count, mask_.gpu_data(), mask_.gpu_data(), &normalize_coeff_);
normalize_coeff_ /= mask_.channels();
} else {
normalize_coeff_ = num;
}
if (this->layer_param_.l1_loss_param().l2_per_location()) {
// set masked (NaNs only) to zero
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
square_layer_->Forward(diff_top_vec_, square_top_vec_);
sum_layer_->Forward(square_top_vec_, sum_top_vec_);
// Mask plateau in summed blob (only one channel):
if(this->layer_param_.l1_loss_param().plateau() > 0) {
float plateau_val_squared = this->layer_param_.l1_loss_param().plateau() * this->layer_param_.l1_loss_param().plateau();
MaskPlateauValuesInitial<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), sum_output_.gpu_data(), plateau_l2_.mutable_gpu_data(), plateau_val_squared);
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_data());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sqrt_layer_->Forward(sum_top_vec_, sqrt_top_vec_);
// Note sign_ is set to all ones in Reshape
caffe_gpu_dot(sqrt_output_.count(), sqrt_output_.gpu_data(), sign_.gpu_data(), &dot);
}
else {
// Mask plateau:
if(this->layer_param_.l1_loss_param().plateau() > 0) {
MaskPlateauValues<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), mask_.mutable_gpu_data(), this->layer_param_.l1_loss_param().plateau());
CUDA_POST_KERNEL_CHECK;
}
//mask_.print("MASK2");
// set masked (NaNs, plateau) to zero
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, mask_.gpu_data(), diffptr->mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
ComputeSign<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, diffptr->gpu_data(), sign_.mutable_gpu_data());
CUDA_POST_KERNEL_CHECK;
caffe_gpu_dot(count, diffptr->gpu_data(), sign_.gpu_data(), &dot);
}
loss = dot / normalize_coeff_;
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
void L1LossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom)
{
bool prop_down = propagate_down[0];
if(bottom.size() > 1) prop_down |= propagate_down[1];
Blob<Dtype> *diffptr = diff_top_vec_[0];
if (prop_down) {
const Dtype alpha = top[0]->cpu_diff()[0] / normalize_coeff_;
if (this->layer_param_.l1_loss_param().l2_per_location()) {
vector<bool> prop_down(1,true);
caffe_gpu_axpby(sqrt_output_.count(), alpha, sign_.gpu_data(),
Dtype(0), sqrt_output_.mutable_gpu_diff());
sqrt_layer_->Backward(sqrt_top_vec_, prop_down, sum_top_vec_);
if(this->layer_param_.l1_loss_param().plateau() > 0) {
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(sum_output_.count()), CAFFE_CUDA_NUM_THREADS>>>(
sum_output_.count(), plateau_l2_.gpu_data(), sum_output_.mutable_gpu_diff());
cudaDeviceSynchronize();
CUDA_POST_KERNEL_CHECK;
}
sum_layer_->Backward(sum_top_vec_, prop_down, square_top_vec_);
square_layer_->Backward(square_top_vec_, prop_down, diff_top_vec_);
}
else {
caffe_gpu_axpby(diffptr->count(), alpha, sign_.gpu_data(),
Dtype(0), diffptr->mutable_gpu_diff());
}
KillMasked<Dtype><<<CAFFE_GET_BLOCKS(diffptr->count()), CAFFE_CUDA_NUM_THREADS>>>(
diffptr->count(), mask_.gpu_data(), diffptr->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
if(bottom.size() > 1) {
diff_layer_->Backward(diff_top_vec_, propagate_down, bottom);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(L1LossLayer);
} // namespace caffe
|
26f2620a1b0ea0cac681799a728be710ee709e9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "kmeans.h"
#include "point.h"
#include "config.h"
/**
Groups the points in a centroid.
*/
__global__ void km_group_by_cluster(Point* points, Centroid* centroids,
int num_centroids, int num_points)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = 0;
float minor_distance = -1.0;
if (idx < num_points) {
for (i = 0; i < num_centroids; i++) {
float my_distance = km_distance(&points[idx], ¢roids[i]);
// if my_distance is less than the lower minor_distance
// or minor_distance is not yet started
if (minor_distance > my_distance || minor_distance == -1.0) {
minor_distance = my_distance;
points[idx].cluster = i;
}
}
}
}
/**
Sum the points of each centroid
*/
__global__ void km_sum_points_cluster(Point* points, Centroid* centroids,
int num_centroids, int num_points)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_points) {
for (int i = 0; i < num_centroids; i++) {
if (points[idx].cluster == i) {
atomicAdd(¢roids[i].x_sum, points[idx].x);
atomicAdd(¢roids[i].y_sum, points[idx].y);
atomicAdd(¢roids[i].num_points, 1);
}
}
}
}
/**
Clear the x_sum, y_sum and num_points, used in last iteration.
*/
__global__ void km_clear_last_iteration(Centroid* centroids, int num_centroids)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_centroids) {
centroids[idx].x_sum = 0.0;
centroids[idx].y_sum = 0.0;
centroids[idx].num_points = 0.0;
}
}
/**
Update the centroids with current clustering.
Gets the x and y sum and divides by number of point for each centroid.\
*/
__global__ void km_update_centroids(Centroid* centroids, int num_centroids)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_centroids) {
if (centroids[idx].num_points > 0) {
centroids[idx].x = centroids[idx].x_sum / centroids[idx].num_points;
centroids[idx].y = centroids[idx].y_sum / centroids[idx].num_points;
}
}
// I need this values to plot, so, I created km_clear_last_iteration.
// with this new function we lost 1ms :'(
// __syncthreads();
// clear the values to next iteration
// centroids[idx].x_sum = 0.0;
// centroids[idx].y_sum = 0.0;
// centroids[idx].num_points = 0.0;
}
/**
Compare the clusters of each point.
@param p1 - points of current iteration
@param p2 - points of last iteration
*/
__global__ void km_points_compare(Point* p1, Point* p2, int num_points,
int *result)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_points) {
// if any points has its cluster different, changes the result variable
if (p1[idx].cluster != p2[idx].cluster) {
*result = 0;
}
}
}
/**
Copy a point array.
Utilized to copy the status of points on the last iteration to compare them.
*/
__global__ void km_points_copy(Point* p_dest, Point* p_src, int num_points)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_points) {
p_dest[idx] = p_src[idx];
}
}
/**
* Executes the k-mean algorithm.
* To measure your global methods, use that:
*
* hipEvent_t start, stop;
* float time;
* hipEventCreate(&start);
* hipEventCreate(&stop);
* hipEventRecord(start, 0);
*
* // put your__global__ method here!
*
* hipEventRecord(stop, 0);
* hipEventSynchronize(stop);
* hipEventElapsedTime(&time, start, stop);
* printf("%lf\n", times)
*/
void km_execute(Point* h_points, Centroid* h_centroids, int num_points,
int num_centroids)
{
int iterations = 0;
Point* d_points;
Point* d_points_old;
Centroid* d_centroids;
int h_res = 1;
int *d_res;
hipMalloc((void**) &d_res, sizeof(int));
hipMalloc((void**) &d_points_old, sizeof(Point) * num_points);
hipMalloc((void **) &d_points, sizeof(Point) * num_points);
hipMalloc((void **) &d_centroids, sizeof(Centroid) * num_centroids);
hipMemcpy(d_points, h_points, sizeof(Point) * num_points, hipMemcpyHostToDevice);
hipMemcpy(d_centroids, h_centroids, sizeof(Centroid) * num_centroids, hipMemcpyHostToDevice);
while (true) {
hipLaunchKernelGGL(( km_clear_last_iteration), dim3(ceil(num_centroids/10)), dim3(10), 0, 0, d_centroids, num_centroids);
hipDeviceSynchronize();
hipLaunchKernelGGL(( km_group_by_cluster), dim3(ceil(num_points/100)), dim3(100), 0, 0, d_points, d_centroids,
num_centroids, num_points);
hipDeviceSynchronize();
hipLaunchKernelGGL(( km_sum_points_cluster), dim3(ceil(num_points/100)), dim3(100), 0, 0, d_points, d_centroids,
num_centroids, num_points);
hipDeviceSynchronize();
hipLaunchKernelGGL(( km_update_centroids), dim3(ceil(num_centroids/10)), dim3(10), 0, 0, d_centroids, num_centroids);
hipDeviceSynchronize();
if (REPOSITORY_SPECIFICATION == 1) {
// in repository specifications,
// we just want know if number of
// iterations is equals NUMBER_OF_ITERATIONS - 1 (iterations starts in 0)
if (iterations == (NUMBER_OF_ITERATIONS - 1)) {
break;
}
} else {
// TODO: WARNING:
// THIS IMPLEMENTATION IS NOT WORKING YET!
if (iterations > 0) {
h_res = 1;
hipMemcpy(d_res, &h_res , sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( km_points_compare), dim3(ceil(num_points/10)), dim3(10), 0, 0, d_points, d_points_old,
num_points, d_res);
hipDeviceSynchronize();
hipMemcpy(&h_res, d_res, sizeof(int), hipMemcpyDeviceToHost);
// if h_rest == 1 the two vector of points are equal and the kmeans iterations
// has completed all work
if (h_res == 1) {
break;
}
}
hipLaunchKernelGGL(( km_points_copy), dim3(ceil(num_points/100)), dim3(100), 0, 0, d_points_old, d_points,
num_points);
hipDeviceSynchronize();
}
iterations++;
}
hipMemcpy(h_centroids, d_centroids , sizeof(Centroid) * num_centroids, hipMemcpyDeviceToHost);
hipFree(d_points);
hipFree(d_centroids);
hipFree(d_points_old);
hipFree(d_res);
}
| 26f2620a1b0ea0cac681799a728be710ee709e9d.cu | #include <stdio.h>
#include <stdlib.h>
#include "kmeans.h"
#include "point.h"
#include "config.h"
/**
Groups the points in a centroid.
*/
__global__ void km_group_by_cluster(Point* points, Centroid* centroids,
int num_centroids, int num_points)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i = 0;
float minor_distance = -1.0;
if (idx < num_points) {
for (i = 0; i < num_centroids; i++) {
float my_distance = km_distance(&points[idx], ¢roids[i]);
// if my_distance is less than the lower minor_distance
// or minor_distance is not yet started
if (minor_distance > my_distance || minor_distance == -1.0) {
minor_distance = my_distance;
points[idx].cluster = i;
}
}
}
}
/**
Sum the points of each centroid
*/
__global__ void km_sum_points_cluster(Point* points, Centroid* centroids,
int num_centroids, int num_points)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_points) {
for (int i = 0; i < num_centroids; i++) {
if (points[idx].cluster == i) {
atomicAdd(¢roids[i].x_sum, points[idx].x);
atomicAdd(¢roids[i].y_sum, points[idx].y);
atomicAdd(¢roids[i].num_points, 1);
}
}
}
}
/**
Clear the x_sum, y_sum and num_points, used in last iteration.
*/
__global__ void km_clear_last_iteration(Centroid* centroids, int num_centroids)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_centroids) {
centroids[idx].x_sum = 0.0;
centroids[idx].y_sum = 0.0;
centroids[idx].num_points = 0.0;
}
}
/**
Update the centroids with current clustering.
Gets the x and y sum and divides by number of point for each centroid.\
*/
__global__ void km_update_centroids(Centroid* centroids, int num_centroids)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_centroids) {
if (centroids[idx].num_points > 0) {
centroids[idx].x = centroids[idx].x_sum / centroids[idx].num_points;
centroids[idx].y = centroids[idx].y_sum / centroids[idx].num_points;
}
}
// I need this values to plot, so, I created km_clear_last_iteration.
// with this new function we lost 1ms :'(
// __syncthreads();
// clear the values to next iteration
// centroids[idx].x_sum = 0.0;
// centroids[idx].y_sum = 0.0;
// centroids[idx].num_points = 0.0;
}
/**
Compare the clusters of each point.
@param p1 - points of current iteration
@param p2 - points of last iteration
*/
__global__ void km_points_compare(Point* p1, Point* p2, int num_points,
int *result)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_points) {
// if any points has its cluster different, changes the result variable
if (p1[idx].cluster != p2[idx].cluster) {
*result = 0;
}
}
}
/**
Copy a point array.
Utilized to copy the status of points on the last iteration to compare them.
*/
__global__ void km_points_copy(Point* p_dest, Point* p_src, int num_points)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_points) {
p_dest[idx] = p_src[idx];
}
}
/**
* Executes the k-mean algorithm.
* To measure your global methods, use that:
*
* cudaEvent_t start, stop;
* float time;
* cudaEventCreate(&start);
* cudaEventCreate(&stop);
* cudaEventRecord(start, 0);
*
* // put your__global__ method here!
*
* cudaEventRecord(stop, 0);
* cudaEventSynchronize(stop);
* cudaEventElapsedTime(&time, start, stop);
* printf("%lf\n", times)
*/
void km_execute(Point* h_points, Centroid* h_centroids, int num_points,
int num_centroids)
{
int iterations = 0;
Point* d_points;
Point* d_points_old;
Centroid* d_centroids;
int h_res = 1;
int *d_res;
cudaMalloc((void**) &d_res, sizeof(int));
cudaMalloc((void**) &d_points_old, sizeof(Point) * num_points);
cudaMalloc((void **) &d_points, sizeof(Point) * num_points);
cudaMalloc((void **) &d_centroids, sizeof(Centroid) * num_centroids);
cudaMemcpy(d_points, h_points, sizeof(Point) * num_points, cudaMemcpyHostToDevice);
cudaMemcpy(d_centroids, h_centroids, sizeof(Centroid) * num_centroids, cudaMemcpyHostToDevice);
while (true) {
km_clear_last_iteration<<<ceil(num_centroids/10), 10>>>(d_centroids, num_centroids);
cudaDeviceSynchronize();
km_group_by_cluster<<<ceil(num_points/100), 100>>>(d_points, d_centroids,
num_centroids, num_points);
cudaDeviceSynchronize();
km_sum_points_cluster<<<ceil(num_points/100), 100>>>(d_points, d_centroids,
num_centroids, num_points);
cudaDeviceSynchronize();
km_update_centroids<<<ceil(num_centroids/10), 10>>>(d_centroids, num_centroids);
cudaDeviceSynchronize();
if (REPOSITORY_SPECIFICATION == 1) {
// in repository specifications,
// we just want know if number of
// iterations is equals NUMBER_OF_ITERATIONS - 1 (iterations starts in 0)
if (iterations == (NUMBER_OF_ITERATIONS - 1)) {
break;
}
} else {
// TODO: WARNING:
// THIS IMPLEMENTATION IS NOT WORKING YET!
if (iterations > 0) {
h_res = 1;
cudaMemcpy(d_res, &h_res , sizeof(int), cudaMemcpyHostToDevice);
km_points_compare<<<ceil(num_points/10), 10>>>(d_points, d_points_old,
num_points, d_res);
cudaDeviceSynchronize();
cudaMemcpy(&h_res, d_res, sizeof(int), cudaMemcpyDeviceToHost);
// if h_rest == 1 the two vector of points are equal and the kmeans iterations
// has completed all work
if (h_res == 1) {
break;
}
}
km_points_copy<<<ceil(num_points/100), 100>>>(d_points_old, d_points,
num_points);
cudaDeviceSynchronize();
}
iterations++;
}
cudaMemcpy(h_centroids, d_centroids , sizeof(Centroid) * num_centroids, cudaMemcpyDeviceToHost);
cudaFree(d_points);
cudaFree(d_centroids);
cudaFree(d_points_old);
cudaFree(d_res);
}
|
c2cc5c39814d6d6e2b83957ca390e0f66fdacc73.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Context.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/PinnedMemoryAllocator.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/hip/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/hip/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THH/THH.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *wr, scalar_t *wi, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork, magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaDoubleComplex alpha({1, 0});
magma_ztrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaFloatComplex alpha({1, 0});
magma_ctrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, double *A, magma_int_t lda,
double *wr, double *wi, double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr, double *work, magma_int_t lwork, magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, float *A, magma_int_t lda,
float *wr, float *wi, float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr, float *work, magma_int_t lwork, magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(hipGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = ::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = ::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = ::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *wi = out_eigvals_data+n;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t>(MagmaNoVec, jobvr, n, self_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
magma_int_t lwork = (magma_int_t) wkopt;
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t>(MagmaNoVec, jobvr, n, self_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self'
// compute_eigenvectors controls whether eigenvectors should be computed
// uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L"
// '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos'
// See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp
std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) {
// NumPy allows lowercase input for UPLO argument
// It is assumed that uplo_str is either "U" or "L"
char uplo = std::toupper(uplo_str[0]);
bool upper = uplo == 'U' ? true : false;
return _symeig_helper_cuda(self, compute_eigenvectors, upper);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = ::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = ::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| c2cc5c39814d6d6e2b83957ca390e0f66fdacc73.cu | #include <ATen/Context.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/PinnedMemoryAllocator.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/native/cuda/MiscUtils.h>
#include <ATen/native/Resize.h>
#include <ATen/native/BatchLinearAlgebra.h>
#include <ATen/native/cuda/BatchLinearAlgebraLib.h>
#include <ATen/native/cpu/zmath.h>
#include <THC/THC.h> // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
const bool use_magma_ = true;
#else
const bool use_magma_ = false;
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLu(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info);
template<class scalar_t>
void magmaLuBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaLuNoPiv(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
magma_int_t* info);
template<class scalar_t>
void magmaLuNoPivBatched(
magma_int_t m, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGetriOptimalBlocksize(magma_int_t n);
template<class scalar_t>
void magmaGetri(
magma_int_t n, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv, scalar_t* dwork,
magma_int_t lwork, magma_int_t* info);
template<class scalar_t>
void magmaGetriBatched(
magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, scalar_t** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholeskySolve(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaCholeskySolveBatched(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaCholesky(
magma_uplo_t uplo, magma_int_t n, scalar_t* dA,
magma_int_t ldda, magma_int_t* info);
template<class scalar_t>
void magmaCholeskyBatched(
magma_uplo_t uplo, magma_int_t n, scalar_t** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<class scalar_t>
void magmaTriangularSolve(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t* dA, magma_int_t ldda, scalar_t* dB, magma_int_t lddb);
template<class scalar_t>
void magmaTriangularSolveBatched(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
scalar_t** dA_array, magma_int_t ldda, scalar_t** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue);
template<class scalar_t>
inline magma_int_t magmaGeqrfOptimalBlocksize(magma_int_t m, magma_int_t n);
template<class scalar_t>
void magmaGeqrf(
magma_int_t m, magma_int_t n, scalar_t* dA, magma_int_t ldda,
scalar_t* tau, scalar_t* dT, magma_int_t* info, bool is_v2);
template<class scalar_t>
void magmaOrgqr(
magma_int_t m, magma_int_t n, magma_int_t k, scalar_t* dA,
magma_int_t ldda, scalar_t* tau, scalar_t* dT, magma_int_t nb, magma_int_t* info);
template<class scalar_t, class value_t=scalar_t>
void magmaSymeig(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, scalar_t* dA, magma_int_t ldda,
value_t* w, scalar_t* wA, magma_int_t ldwa, scalar_t* work, magma_int_t lwork, value_t* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info);
template<class scalar_t>
void magmaEig(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, scalar_t *A, magma_int_t lda,
scalar_t *wr, scalar_t *wi, scalar_t *VL, magma_int_t ldvl,
scalar_t *VR, magma_int_t ldvr, scalar_t *work, magma_int_t lwork, magma_int_t *info);
template<class scalar_t, class value_t=scalar_t>
void magmaSvd(
magma_vec_t jobz, magma_int_t m, magma_int_t n, scalar_t* A,
magma_int_t lda, value_t* s, scalar_t* U, magma_int_t ldu,
scalar_t* VT, magma_int_t ldvt, scalar_t* work, magma_int_t lwork,
value_t* rwork,
magma_int_t* iwork, magma_int_t* info);
template<class scalar_t>
void magmaLuSolve(
magma_int_t n, magma_int_t nrhs, scalar_t* dA, magma_int_t ldda, magma_int_t* ipiv,
scalar_t* dB, magma_int_t lddb, magma_int_t* info);
template<class scalar_t>
void magmaLuSolveBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
scalar_t** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue);
template<>
void magmaSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
magma_int_t* ipiv, double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
magma_int_t* ipiv, float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgesv_gpu(n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesv_gpu(n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesv_gpu(n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_dgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_sgesv_batched(n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<double>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_zgesv_batched(n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, c10::complex<float>** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, const MAGMAQueue& magma_queue) {
magma_cgesv_batched(n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, dinfo_array, batch_count, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_gpu(m, n, dA, ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLu<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* ipiv, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_dgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_sgetrf_batched(m, n, dA_array, ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetrf_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetrf_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, ipiv_array, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrf_nopiv_gpu(m, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPiv<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrf_nopiv_gpu(m, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<double>(
magma_int_t m, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<float>(
magma_int_t m, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetrf_nopiv_batched(m, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<double>>(
magma_int_t m, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zgetrf_nopiv_batched(m, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuNoPivBatched<c10::complex<float>>(
magma_int_t m, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cgetrf_nopiv_batched(m, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<double>(magma_int_t n) {
return magma_get_dgetri_nb(n);
}
template<>
inline magma_int_t magmaGetriOptimalBlocksize<float>(magma_int_t n) {
return magma_get_sgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<double>>(
magma_int_t n) {
return magma_get_zgetri_nb(n);
}
template <>
inline magma_int_t magmaGetriOptimalBlocksize<c10::complex<float>>(
magma_int_t n) {
return magma_get_cgetri_nb(n);
}
template<>
void magmaGetri<double>(
magma_int_t n, double* dA, magma_int_t ldda, magma_int_t* ipiv, double* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetri<float>(
magma_int_t n, float* dA, magma_int_t ldda, magma_int_t* ipiv, float* dwork,
magma_int_t lwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetri_gpu(n, dA, ldda, ipiv, dwork, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<double>>(
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<double>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetri_gpu(
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaDoubleComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetri<c10::complex<float>>(
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
magma_int_t* ipiv,
c10::complex<float>* dwork,
magma_int_t lwork,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetri_gpu(
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
ipiv,
reinterpret_cast<magmaFloatComplex*>(dwork),
lwork,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<double>(
magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, double** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGetriBatched<float>(
magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t** ipiv_array, float** dinvA_array, magma_int_t lddia,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_sgetri_outofplace_batched(n, dA_array, ldda, ipiv_array, dinvA_array, lddia, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<double>>(
magma_int_t n,
c10::complex<double>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<double>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_zgetri_outofplace_batched(
n,
reinterpret_cast<magmaDoubleComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaDoubleComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGetriBatched<c10::complex<float>>(
magma_int_t n,
c10::complex<float>** dA_array,
magma_int_t ldda,
magma_int_t** ipiv_array,
c10::complex<float>** dinvA_array,
magma_int_t lddia,
magma_int_t* info_array,
magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magma_cgetri_outofplace_batched(
n,
reinterpret_cast<magmaFloatComplex**>(dA_array),
ldda,
ipiv_array,
reinterpret_cast<magmaFloatComplex**>(dinvA_array),
lddia,
info_array,
batchsize,
magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrs_gpu(uplo, n, nrhs, dA, ldda, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolve<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrs_gpu(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<double>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
double** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dpotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<float>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
float** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_spotrs_batched(uplo, n, nrhs, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskySolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cpotrs_batched(uplo, n, nrhs,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<double>(
magma_uplo_t uplo, magma_int_t n, double* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dpotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<float>(
magma_uplo_t uplo, magma_int_t n, float* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_spotrf_gpu(uplo, n, dA, ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zpotrf_gpu(uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholesky<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA,
magma_int_t ldda, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cpotrf_gpu(uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<double>(
magma_uplo_t uplo, magma_int_t n, double** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_dpotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<float>(
magma_uplo_t uplo, magma_int_t n, float** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_spotrf_batched(uplo, n, dA_array, ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<double>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_zpotrf_batched(uplo, n, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaCholeskyBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_int_t n, c10::complex<float>** dA_array, magma_int_t ldda,
magma_int_t* info_array, magma_int_t batchsize, const MAGMAQueue& magma_queue) {
magma_cpotrf_batched(uplo, n, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, info_array, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double* dA, magma_int_t ldda, double* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_dtrsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float* dA, magma_int_t ldda, float* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magma_strsm(MagmaLeft, uplo, trans, diag, m, n, 1, dA, ldda, dB, lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>* dA, magma_int_t ldda, c10::complex<double>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaDoubleComplex alpha({1, 0});
magma_ztrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex*>(dA), ldda,
reinterpret_cast<magmaDoubleComplex*>(dB), lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolve<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>* dA, magma_int_t ldda, c10::complex<float>* dB, magma_int_t lddb) {
MagmaStreamSyncGuard guard;
magmaFloatComplex alpha({1, 0});
magma_ctrsm(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex*>(dA), ldda,
reinterpret_cast<magmaFloatComplex*>(dB), lddb);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<double>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
double** dA_array, magma_int_t ldda, double** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_dtrsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<float>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
float** dA_array, magma_int_t ldda, float** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmablas_strsm_batched(MagmaLeft, uplo, trans, diag, m, n, 1, dA_array, ldda, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<double>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<double>** dA_array, magma_int_t ldda, c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaDoubleComplex alpha({1, 0});
magmablas_ztrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda,
reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaTriangularSolveBatched<c10::complex<float>>(
magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t m, magma_int_t n,
c10::complex<float>** dA_array, magma_int_t ldda, c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t batchsize,
const MAGMAQueue& magma_queue) {
magmaFloatComplex alpha({1, 0});
magmablas_ctrsm_batched(MagmaLeft, uplo, trans, diag, m, n, alpha,
reinterpret_cast<magmaFloatComplex**>(dA_array), ldda,
reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<double>(magma_int_t m, magma_int_t n) {
return magma_get_dgeqrf_nb(m, n);
}
template<>
inline magma_int_t magmaGeqrfOptimalBlocksize<float>(magma_int_t m, magma_int_t n) {
return magma_get_sgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<double>>(
magma_int_t m,
magma_int_t n) {
return magma_get_zgeqrf_nb(m, n);
}
template <>
inline magma_int_t magmaGeqrfOptimalBlocksize<c10::complex<float>>(
magma_int_t m,
magma_int_t n) {
return magma_get_cgeqrf_nb(m, n);
}
template<>
void magmaGeqrf<double>(
magma_int_t m, magma_int_t n, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_dgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_dgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaGeqrf<float>(
magma_int_t m, magma_int_t n, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t* info, bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_sgeqrf_gpu(m, n, dA, ldda, tau, dT, info);
} else {
magma_sgeqrf2_gpu(m, n, dA, ldda, tau, info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_zgeqrf_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
info);
} else {
magma_zgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaGeqrf<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t* info,
bool is_v2) {
MagmaStreamSyncGuard guard;
if (!is_v2) {
magma_cgeqrf_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
info);
} else {
magma_cgeqrf2_gpu(
m,
n,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
info);
}
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<double>(
magma_int_t m, magma_int_t n, magma_int_t k, double* dA, magma_int_t ldda,
double* tau, double* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaOrgqr<float>(
magma_int_t m, magma_int_t n, magma_int_t k, float* dA, magma_int_t ldda,
float* tau, float* dT, magma_int_t nb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sorgqr_gpu(m, n, k, dA, ldda, tau, dT, nb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<double>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<double>* dA,
magma_int_t ldda,
c10::complex<double>* tau,
c10::complex<double>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaDoubleComplex*>(dA),
ldda,
reinterpret_cast<magmaDoubleComplex*>(tau),
reinterpret_cast<magmaDoubleComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template <>
void magmaOrgqr<c10::complex<float>>(
magma_int_t m,
magma_int_t n,
magma_int_t k,
c10::complex<float>* dA,
magma_int_t ldda,
c10::complex<float>* tau,
c10::complex<float>* dT,
magma_int_t nb,
magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cungqr_gpu(
m,
n,
k,
reinterpret_cast<magmaFloatComplex*>(dA),
ldda,
reinterpret_cast<magmaFloatComplex*>(tau),
reinterpret_cast<magmaFloatComplex*>(dT),
nb,
info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, double* dA, magma_int_t ldda,
double* w, double* wA, magma_int_t ldwa, double* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_dsyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, float* dA, magma_int_t ldda,
float* w, float* wA, magma_int_t ldwa, float* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
(void)rwork; // unused
(void)lrwork; // unused
MagmaStreamSyncGuard guard;
magma_ssyevd_gpu(jobz, uplo, n, dA, ldda, w, wA, ldwa, work, lwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<double>, double>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<double>* dA, magma_int_t ldda,
double* w, c10::complex<double>* wA, magma_int_t ldwa, c10::complex<double>* work, magma_int_t lwork, double* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, w, reinterpret_cast<magmaDoubleComplex*>(wA),
ldwa, reinterpret_cast<magmaDoubleComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSymeig<c10::complex<float>, float>(
magma_vec_t jobz, magma_uplo_t uplo, magma_int_t n, c10::complex<float>* dA, magma_int_t ldda,
float* w, c10::complex<float>* wA, magma_int_t ldwa, c10::complex<float>* work, magma_int_t lwork, float* rwork,
magma_int_t lrwork, magma_int_t* iwork, magma_int_t liwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cheevd_gpu(
jobz, uplo, n, reinterpret_cast<magmaFloatComplex*>(dA), ldda, w, reinterpret_cast<magmaFloatComplex*>(wA),
ldwa, reinterpret_cast<magmaFloatComplex*>(work), lwork, rwork, lrwork, iwork, liwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<double>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, double *A, magma_int_t lda,
double *wr, double *wi, double *VL, magma_int_t ldvl,
double *VR, magma_int_t ldvr, double *work, magma_int_t lwork, magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_dgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaEig<float>(
magma_vec_t jobvl, magma_vec_t jobvr, magma_int_t n, float *A, magma_int_t lda,
float *wr, float *wi, float *VL, magma_int_t ldvl,
float *VR, magma_int_t ldvr, float *work, magma_int_t lwork, magma_int_t *info) {
MagmaStreamSyncGuard guard;
magma_sgeev(jobvl, jobvr, n, A, lda, wr, wi, VL, ldvl, VR, ldvr, work, lwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, double* A,
magma_int_t lda, double* s, double* U, magma_int_t ldu,
double* VT, magma_int_t ldvt, double* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_dgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, float* A,
magma_int_t lda, float* s, float* U, magma_int_t ldu,
float* VT, magma_int_t ldvt, float* work, magma_int_t lwork,
float* rwork, magma_int_t* iwork, magma_int_t* info) {
(void)rwork; // unused
MagmaStreamSyncGuard guard;
magma_sgesdd(jobz, m, n, A, lda, s, U, ldu, VT, ldvt, work, lwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<float>, float>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<float>* A,
magma_int_t lda, float* s, c10::complex<float>* U, magma_int_t ldu,
c10::complex<float>* VT, magma_int_t ldvt, c10::complex<float>* work, magma_int_t lwork,
float *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgesdd(jobz, m, n, reinterpret_cast<magmaFloatComplex*>(A), lda, s,
reinterpret_cast<magmaFloatComplex*>(U), ldu,
reinterpret_cast<magmaFloatComplex*>(VT), ldvt,
reinterpret_cast<magmaFloatComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaSvd<c10::complex<double>, double>(
magma_vec_t jobz, magma_int_t m, magma_int_t n, c10::complex<double>* A,
magma_int_t lda, double* s, c10::complex<double>* U, magma_int_t ldu,
c10::complex<double>* VT, magma_int_t ldvt, c10::complex<double>* work, magma_int_t lwork,
double *rwork, magma_int_t* iwork, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgesdd(jobz, m, n, reinterpret_cast<magmaDoubleComplex*>(A), lda, s,
reinterpret_cast<magmaDoubleComplex*>(U), ldu,
reinterpret_cast<magmaDoubleComplex*>(VT), ldvt,
reinterpret_cast<magmaDoubleComplex*>(work), lwork,
rwork, iwork, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<double>(
magma_int_t n, magma_int_t nrhs, double* dA, magma_int_t ldda, magma_int_t* ipiv,
double* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_dgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<float>(
magma_int_t n, magma_int_t nrhs, float* dA, magma_int_t ldda, magma_int_t* ipiv,
float* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_sgetrs_gpu(MagmaNoTrans, n, nrhs, dA, ldda, ipiv, dB, lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<double>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_zgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaDoubleComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolve<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>* dA, magma_int_t ldda, magma_int_t* ipiv,
c10::complex<float>* dB, magma_int_t lddb, magma_int_t* info) {
MagmaStreamSyncGuard guard;
magma_cgetrs_gpu(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex*>(dA), ldda, ipiv, reinterpret_cast<magmaFloatComplex*>(dB), lddb, info);
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
double** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_dgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
float** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_sgetrs_batched(MagmaNoTrans, n, nrhs, dA_array, ldda, dipiv_array, dB_array, lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<double>>(
magma_int_t n, magma_int_t nrhs, c10::complex<double>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<double>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_zgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaDoubleComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaDoubleComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
template<>
void magmaLuSolveBatched<c10::complex<float>>(
magma_int_t n, magma_int_t nrhs, c10::complex<float>** dA_array, magma_int_t ldda, magma_int_t** dipiv_array,
c10::complex<float>** dB_array, magma_int_t lddb, magma_int_t& info,
magma_int_t batchsize, const MAGMAQueue& magma_queue) {
info = magma_cgetrs_batched(MagmaNoTrans, n, nrhs, reinterpret_cast<magmaFloatComplex**>(dA_array), ldda, dipiv_array, reinterpret_cast<magmaFloatComplex**>(dB_array), lddb, batchsize, magma_queue.get_queue());
AT_CUDA_CHECK(cudaGetLastError());
}
#endif
#define ALLOCATE_ARRAY(name, type, size) \
auto storage_##name = pin_memory<type>(size); \
name = static_cast<type*>(storage_##name.data());
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_solve(Tensor& b, Tensor& A, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t lda = std::max(magma_int_t{1}, n);
if (b.dim() == 2) {
auto ipiv = at::empty({n}, at::kInt);
magma_int_t info = 0;
magmaSolve<scalar_t>(n, nrhs, A_data, lda, ipiv.data_ptr<magma_int_t>(),
b_data, lda, &info);
infos[0] = info;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaSolveBatched<scalar_t>(
n, nrhs, A_array_cur, lda, ipiv_array_cur, b_array_cur, lda,
info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaSolveBatched<scalar_t>(
n, nrhs, &A_array[mini_idx], lda, &ipiv_array[mini_idx], &b_array[mini_idx], lda,
&info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
std::tuple<Tensor, Tensor> _solve_helper_cuda(const Tensor& self, const Tensor& A) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
std::vector<int64_t> infos(batchCount(self), 0);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "solve_cuda", [&]{
apply_solve<scalar_t>(self_working_copy, A_working_copy, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "solve_cuda");
} else {
singleCheckErrors(infos[0], "solve_cuda");
}
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_batched_inverse(Tensor& self, Tensor& self_inv, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
auto self_mat_stride = matrixStride(self);
auto self_inv_data = self_inv.data_ptr<scalar_t>();
auto self_inv_mat_stride = matrixStride(self_inv);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** self_array;
scalar_t** self_inv_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(self_inv_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
self_inv_array[i] = &self_inv_data[i * self_inv_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
MAGMAQueue magma_queue(self.get_device());
magmaLuBatched<scalar_t>(
n, n, self_array, n, ipiv_array, info_array,
batch_size, magma_queue);
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
scalar_t** self_inv_array_cur = &self_inv_array[mini_idx];
magma_int_t** ipiv_array_cur = &ipiv_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaGetriBatched<scalar_t>(
n, self_array_cur, n, ipiv_array_cur, self_inv_array_cur,
n, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaGetriBatched<scalar_t>(
n, &self_array[mini_idx], n, &ipiv_array[mini_idx], &self_inv_array[mini_idx],
n, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
template <typename scalar_t>
static void apply_single_inverse(Tensor& self, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("inverse: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
magma_int_t lwork = n * magmaGetriOptimalBlocksize<scalar_t>(n);
magma_int_t info_tmp = 0;
Tensor ipiv = at::empty({n}, at::kInt);
Tensor dwork = at::empty({lwork}, self.options());
magmaLu<scalar_t>(n, n, self_data, n, ipiv.data_ptr<magma_int_t>(), &info_tmp);
if (info_tmp != 0) {
info = info_tmp;
return;
}
magmaGetri<scalar_t>(
n, self_data, n, ipiv.data_ptr<magma_int_t>(), dwork.data_ptr<scalar_t>(), lwork, &info_tmp);
info = info_tmp;
#endif
}
Tensor _inverse_helper_cuda_legacy(const Tensor& self) {
auto self_inv_working_copy = cloneBatchedColumnMajor(self);
if (self.dim() > 2) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_batched_inverse<scalar_t>(
self_working_copy, self_inv_working_copy, infos);
});
batchCheckErrors(infos, "inverse_cuda");
} else {
int64_t info = 0;
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "inverse_cuda", [&]{
apply_single_inverse<scalar_t>(self_inv_working_copy, info);
});
singleCheckErrors(info, "inverse_cuda");
}
return self_inv_working_copy;
}
Tensor _inverse_helper_cuda(const Tensor& self) {
#ifdef USE_CUSOLVER
if ((self.dim() == 2) || (/* self.dim() > 2 && */ batchCount(self) <= 2) || !use_magma_) {
return _inverse_helper_cuda_lib(self); // cusolver or cublas
} else {
return _inverse_helper_cuda_legacy(self); // magma-cuda
}
#else
return _inverse_helper_cuda_legacy(self); // magma-cuda
#endif
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky_solve(Tensor& b, Tensor& A, bool upper, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("cholesky_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
int info_tmp = 0;
if (b.dim() == 2) {
magmaCholeskySolve<scalar_t>(uplo, n, nrhs, A_data, n,
b_data, n, &info_tmp);
info = info_tmp;
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, A_array_cur, n, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaCholeskySolveBatched<scalar_t>(
uplo, n, nrhs, &A_array[mini_idx], n, &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _cholesky_solve_helper_cuda(const Tensor& self, const Tensor& A, bool upper) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_solve_cuda", [&]{
apply_cholesky_solve<scalar_t>(self_working_copy, A_working_copy, upper, info);
});
TORCH_CHECK(info == 0, "MAGMA cholesky_solve : invalid argument: ", -info);
return self_working_copy;
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_cholesky(Tensor& self, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("cholesky: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
auto self_data = self.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(self.size(-2), "self.size(-2)");
auto lda = std::max<magma_int_t>(1, n);
if (self.dim() == 2) {
magma_int_t info = 0;
magmaCholesky<scalar_t>(uplo, n, self_data, lda, &info);
infos[0] = info;
} else {
auto self_mat_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
magma_int_t* info_array;
scalar_t** self_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size);
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_mat_stride];
}
MAGMAQueue magma_queue(self.get_device());
int64_t batch_limit = self.is_complex() ? 65535 : 262140;
// Compute as many batches of 262140 possible
// 262140 is the size of the largest batch of matrices that can be run with
// violating maximum kernel configuration
// For complex input the batch limit is 65535 (determined experimentally, see https://github.com/pytorch/pytorch/pull/47047#discussion_r516086923 for more information)
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit cholesky calls
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** self_array_cur = &self_array[mini_idx];
magma_int_t* info_array_cur = &info_array[mini_idx];
magmaCholeskyBatched<scalar_t>(
uplo, n, self_array_cur, lda, info_array_cur, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaCholeskyBatched<scalar_t>(
uplo, n, &self_array[mini_idx], lda, &info_array[mini_idx], batch_size % batch_limit, magma_queue);
}
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
}
#endif
}
Tensor _cholesky_helper_cuda(const Tensor& self, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
Tensor self_working_copy;
if (upper) {
self_working_copy = cloneBatchedColumnMajor(self.transpose(-1, -2));
} else {
self_working_copy = cloneBatchedColumnMajor(self);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "cholesky_cuda", [&]{
apply_cholesky<scalar_t>(self_working_copy, false, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "cholesky_cuda");
} else {
singleCheckErrors(infos[0], "cholesky_cuda");
}
if (upper) {
return self_working_copy.transpose(-1, -2);
} else {
return self_working_copy;
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu(Tensor& self, Tensor& pivots, Tensor& infos, bool get_pivots) {
#ifndef USE_MAGMA
AT_ERROR("lu: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto self_data = self.data_ptr<scalar_t>();
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_int_t k = std::min(m, n);
if (self.dim() == 2) {
// If `pivots` is defined, then we have to compute them.
// magmaLu and magmaLuNoPiv use a hybrid CPU-GPU algorithm to compute
// the partially-pivoted LU decomposition with / without pivots.
// The driver routines magma_(d/s)getrf_(nopiv_)gpu accepts a tensor on the CPU for pivots.
// The data is later copied back to the appropriate output tensor.
Tensor info_tmp = at::zeros({}, at::kInt);
if (get_pivots) {
Tensor piv_tmp = at::empty({k}, at::kInt);
magmaLu<scalar_t>(
m, n, self_data, m, piv_tmp.data_ptr<magma_int_t>(), info_tmp.data_ptr<magma_int_t>());
pivots.copy_(piv_tmp);
} else {
magmaLuNoPiv<scalar_t>(m, n, self_data, m, info_tmp.data_ptr<magma_int_t>());
}
infos.copy_(info_tmp);
} else {
auto self_matrix_stride = matrixStride(self);
magma_int_t batch_size = magma_int_cast(batchCount(self), "batchCount");
scalar_t** self_array;
ALLOCATE_ARRAY(self_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
self_array[i] = &self_data[i * self_matrix_stride];
}
MAGMAQueue magma_queue(self.get_device());
// Same comment as in the case of single matrix above.
if (get_pivots) {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto pivots_matrix_stride = pivots.size(-1);
magma_int_t** pivots_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_matrix_stride];
}
magmaLuBatched<scalar_t>(
m, n, self_array, m, pivots_array,
infos.data_ptr<magma_int_t>(), batch_size, magma_queue);
} else {
magmaLuNoPivBatched<scalar_t>(
m, n, self_array, m, infos.data_ptr<magma_int_t>(),
batch_size, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _lu_with_info_cuda(const Tensor& self, bool pivot, bool check_errors) {
TORCH_CHECK(self.dim() >= 2,
"expected tensor with 2 or more dimensions, got size: ", self.sizes(),
" instead");
auto m = self.size(-2);
auto n = self.size(-1);
auto k = std::min(m, n);
auto req_size = self.sizes().vec();
req_size.pop_back();
req_size.back() = k;
Tensor pivots_tensor = at::arange(1, k + 1, self.options().dtype(at::kInt)).expand(req_size).contiguous();
req_size.pop_back();
auto infos_tensor = at::zeros(req_size, self.options().dtype(at::kInt));
Tensor self_working_copy;
if (self.numel() == 0) {
self_working_copy = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
} else {
self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_cuda", [&]{
apply_lu<scalar_t>(self_working_copy, pivots_tensor, infos_tensor, pivot);
});
}
if (check_errors) {
if (self.dim() == 2) {
singleCheckErrors(infos_tensor.item<int64_t>(), "lu", /*allow_singular=*/true);
} else {
batchCheckErrors(infos_tensor, "lu", /*allow_singular=*/true);
}
}
return std::make_tuple(self_working_copy, pivots_tensor, infos_tensor);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ triangular_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_triangular_solve(Tensor& b, Tensor& A, bool upper, bool transpose, bool unitriangular) {
#ifndef USE_MAGMA
AT_ERROR("triangular_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_trans_t trans = transpose ? MagmaTrans : MagmaNoTrans;
magma_diag_t diag = unitriangular ? MagmaUnit : MagmaNonUnit;
auto A_data = A.data_ptr<scalar_t>();
auto b_data = b.data_ptr<scalar_t>();
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
// batch_size == 1 implies that:
// 1. the RHS and LHS tensors have 2 dimensions, or
// 2. the RHS and LHS tensors have more than 2 dimensions but all batch dimensions are 1
if (batch_size == 1) {
magmaTriangularSolve<scalar_t>(uplo, trans, diag, n, nrhs, A_data, n, b_data, n);
} else {
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** A_array_cur = &A_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, A_array_cur,
n, b_array_cur, n, batch_limit, magma_queue);
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0) {
magmaTriangularSolveBatched<scalar_t>(
uplo, trans, diag, n, nrhs, &A_array[mini_idx],
n, &b_array[mini_idx], n, batch_size % batch_limit, magma_queue);
}
}
#endif
}
std::tuple<Tensor, Tensor> _triangular_solve_helper_cuda(const Tensor& self, const Tensor& A,
bool upper, bool transpose, bool unitriangular) {
auto self_working_copy = cloneBatchedColumnMajor(self);
auto A_working_copy = cloneBatchedColumnMajor(A);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "triangular_solve_cuda", [&]{
apply_triangular_solve<scalar_t>(self_working_copy, A_working_copy, upper, transpose, unitriangular);
});
return std::tuple<Tensor, Tensor>(self_working_copy, A_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ qr ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_qr(Tensor& Q, Tensor& R, int64_t n_columns, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("qr: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto q_data = Q.data_ptr<scalar_t>();
auto r_data = R.data_ptr<scalar_t>();
auto q_matrix_stride = matrixStride(Q);
auto r_matrix_stride = matrixStride(R);
magma_int_t m = magma_int_cast(Q.size(-2), "Q.size(-2)");
magma_int_t n = magma_int_cast(R.size(-1), "R.size(-1)");
magma_int_t k = m < n ? m : n;
magma_int_t nb = magmaGeqrfOptimalBlocksize<scalar_t>(m, n);
int64_t batch_size = batchCount(R);
// magmaGeqrf uses a hybrid CPU-GPU algorithm to compute the elementary reflectors.
// The driver routine magma_(d/s)geqrf2_gpu accepts a tensor on the CPU for elementary reflectors.
Tensor tau = at::empty({k}, Q.options().device(at::kCPU));
Tensor work = at::empty({(2 * k + magma_roundup(n, 32)) * nb}, R.options());
scalar_t* tau_data = tau.data_ptr<scalar_t>();
scalar_t* work_data = work.data_ptr<scalar_t>();
// This phase computes R (the raw version)
// This uses MAGMA's ?geqrf2_gpu function
magma_int_t info = 0;
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* r_working_ptr = &r_data[i * r_matrix_stride];
magmaGeqrf<scalar_t>(m, n, r_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/true);
infos[i] = info;
if (info != 0) {
return;
}
}
// This phase computes Q (the raw version)
// We require to perform ?geqrf_gpu again due to this bug in MAGMA:
// - ?geqrf_gpu allows fast computation of Q via ?orgqr_gpu, but doesn't give R properly.
// - ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orgqr_gpu
// Refer to the below link for more details:
// http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* q_working_ptr = &q_data[i * q_matrix_stride];
magmaGeqrf<scalar_t>(m, n, q_working_ptr, m, tau_data, work_data, &info, /*is_v2=*/false);
infos[i] = info;
if (info != 0) {
return;
}
magmaOrgqr<scalar_t>(m, n_columns, k, q_working_ptr, m, tau_data, work_data, nb, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor,Tensor> _qr_helper_cuda(const Tensor& self, bool some) {
std::vector<int64_t> infos(batchCount(self), 0);
// Setup input geometry and inputs for apply_qr
std::vector<int64_t> q_sizes, q_strides;
int64_t n_columns_q;
std::tie(q_sizes, q_strides, n_columns_q) = _compute_geometry_for_Q(self, some);
Tensor q_working_copy, r_working_copy;
// If there are no elements, then we simply return a pair of tensors of required dimensions
if (self.numel() == 0) {
// Fix the number of columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = n_columns_q;
q_working_copy = at::eye(q_sizes[self.dim() - 2], q_sizes[self.dim() - 1], self.options());
q_working_copy = q_working_copy.expand_as(q_working_copy);
// We repurpose the same q_sizes for r_working_copy
// Fix the number of rows and columns of q_working_copy appropriately
q_sizes[self.dim() - 1] = self.size(-1);
q_sizes[self.dim() - 2] = n_columns_q;
r_working_copy = at::empty(q_sizes, self.options());
return std::make_tuple(q_working_copy, r_working_copy);
}
q_working_copy = at::empty_strided(q_sizes, q_strides, self.options());
q_working_copy.narrow(-1, 0, self.size(-1)).copy_(self);
r_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "qr_cuda", [&]{
apply_qr<scalar_t>(q_working_copy, r_working_copy, n_columns_q, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "qr_cuda");
} else {
singleCheckErrors(infos[0], "qr_cuda");
}
return std::make_tuple(q_working_copy.narrow(-1, 0, n_columns_q),
r_working_copy.narrow(-2, 0, n_columns_q).triu());
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ symeig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_symeig(Tensor& self, Tensor& eigvals, bool eigenvectors, bool upper, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("symeig: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto eigvals_data = eigvals.data_ptr<value_t>();
auto self_matrix_stride = matrixStride(self);
auto eigvals_stride = eigvals.size(-1);
int64_t batch_size = batchCount(self);
magma_int_t n = magma_int_cast(self.size(-1), "n");
magma_uplo_t uplo = upper ? MagmaUpper : MagmaLower;
magma_vec_t jobz = eigenvectors ? MagmaVec : MagmaNoVec;
scalar_t* wA;
ALLOCATE_ARRAY(wA, scalar_t, n * n);
magma_int_t info;
// Run once, first to get the optimum work sizes.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magma_int_t liwork = -1;
magma_int_t iwkopt;
magma_int_t lrwork = -1;
value_t rwkopt;
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_data, n, eigvals_data, wA, n, &wkopt, lwork, &rwkopt, lrwork, &iwkopt, liwork, &info);
scalar_t* work;
magma_int_t* iwork;
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
liwork = magma_int_cast(iwkopt, "iwork_size");
ALLOCATE_ARRAY(work, scalar_t, lwork);
ALLOCATE_ARRAY(iwork, magma_int_t, liwork);
value_t* rwork = nullptr;
c10::Storage storage_rwork;
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
lrwork = magma_int_cast(rwkopt, "rwork_size");
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
for (int64_t i = 0; i < batch_size; i++) {
scalar_t* self_working_ptr = &self_data[i * self_matrix_stride];
value_t* eigvals_working_ptr = &eigvals_data[i * eigvals_stride];
magmaSymeig<scalar_t, value_t>(jobz, uplo, n, self_working_ptr, n, eigvals_working_ptr,
wA, n, work, lwork, rwork, lrwork, iwork, liwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor> _symeig_helper_cuda(const Tensor& self, bool eigenvectors, bool upper) {
std::vector<int64_t> infos(batchCount(self), 0);
auto self_sizes = self.sizes().vec();
self_sizes.pop_back();
ScalarType dtype = toValueType(typeMetaToScalarType(self.dtype()));
// magmaSymeig uses a hybrid CPU-GPU algorithm to compute the eigenvalues and eigenvectors.
// The driver routine magma_(d/s)syev_gpu accepts a tensor on the CPU for eigvalenvalues.
// The data is later moved to the appropriate device.
// In the case where self.numel() == 0, we just return an empty tensor of
// dimensions on the CUDA (to avoid the unnecessary "to(at::kCUDA)")
auto eigvals_working_copy = self.numel() == 0
? at::empty(self_sizes, self.options().dtype(dtype))
: at::empty(self_sizes, self.options().dtype(dtype).device(at::kCPU));
if (self.numel() == 0) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy, at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT));
}
auto self_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "symeig_cuda", [&]{
apply_symeig<scalar_t>(self_working_copy, eigvals_working_copy, eigenvectors, upper, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "symeig_cuda");
} else {
singleCheckErrors(infos[0], "symeig_cuda");
}
if (eigenvectors) {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), self_working_copy);
} else {
return std::tuple<Tensor, Tensor>(eigvals_working_copy.to(self.device()), at::empty({0}, self.options()));
}
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ eig ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// magmaEig uses a hybrid CPU-GPU algorithm, which takes and return CPU
// memory. So, we accept a GPU tensor, copy it to CPU memory, and later copy
// the returned values from CPU to GPU. See also magmaSymeig, which uses a
// similar approach.
template <typename scalar_t>
static void apply_eig(const Tensor& self, bool eigenvectors, Tensor& out_eigvals, Tensor& out_eigvecs,
int64_t *info_ptr) {
#ifndef USE_MAGMA
TORCH_CHECK(false, "Calling torch.eig on a CUDA tensor requires compiling PyTorch with MAGMA. "
"Either transfer the tensor to the CPU before calling torch.eig or recompile with MAGMA.");
#else
TORCH_INTERNAL_ASSERT(self.device() == at::kCPU, "Internal error: apply_eig needs a CPU tensor");
magma_vec_t jobvr = eigenvectors ? MagmaVec : MagmaNoVec;
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto self_data = self.data_ptr<scalar_t>();
auto out_eigvals_data = out_eigvals.data_ptr<scalar_t>();
scalar_t *wr = out_eigvals_data;
scalar_t *wi = out_eigvals_data+n;
scalar_t *vr_data = NULL;
magma_int_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = out_eigvecs.data_ptr<scalar_t>();
ldvr = n;
}
if (n > 0) {
// call magmaEig once to get the optimal size of work_data
scalar_t wkopt;
magma_int_t info;
magmaEig<scalar_t>(MagmaNoVec, jobvr, n, self_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
magma_int_t lwork = (magma_int_t) wkopt;
// call it a 2nd time to to the actual work
scalar_t *work_data = nullptr;
ALLOCATE_ARRAY(work_data, scalar_t, lwork);
magmaEig<scalar_t>(MagmaNoVec, jobvr, n, self_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
*info_ptr = info;
}
#endif
}
/*
* Internal helper; like eig_cuda but:
* 1. assume that self is a square matrix of side "n"
* 2. return CPU tensors (because this is what magmaEig returns), which will be copied to GPU memory
* by the caller
*/
std::tuple<Tensor, Tensor> eig_kernel_impl(const Tensor& self, bool& eigenvectors) {
int64_t n = self.size(-1);
// copy self to pinned CPU memory
auto self_working_copy = at::empty_strided(
{n, n}, // square matrix
{1, n}, // column-ordered, as magmaEig expects
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
// tensors holding the results. We use empty_strided to make them column-ordered
auto options = self.options().device(at::kCPU).memory_format(LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto out_eigvals = at::empty_strided({n, 2}, {1, n}, options);
auto out_eigvecs = eigenvectors
? at::empty_strided({n, n}, {1, n}, options)
: Tensor();
int64_t info;
AT_DISPATCH_FLOATING_TYPES(self.scalar_type(), "eig_cuda", [&]{
apply_eig<scalar_t>(self_working_copy, eigenvectors, out_eigvals, out_eigvecs, &info);
});
singleCheckErrors(info, "eig_cuda");
return std::tuple<Tensor, Tensor>(out_eigvals, out_eigvecs);
}
REGISTER_DISPATCH(eig_stub, &eig_kernel_impl);
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ syevd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// This function computes eigenvalues 'w' and eigenvectors 'v' of the tensor 'self'
// compute_eigenvectors controls whether eigenvectors should be computed
// uplo controls the portion of input matrix to consider in computations, allowed values are "u", "U", "l", "L"
// '_symeig_helper_cuda' prepares correct input for 'apply_symeig' and checks for possible errors using 'infos'
// See also CPU implementation in aten/src/ATen/native/BatchLinearAlgebra.cpp
std::tuple<Tensor, Tensor> _syevd_helper_cuda(const Tensor& self, bool compute_eigenvectors, std::string uplo_str) {
// NumPy allows lowercase input for UPLO argument
// It is assumed that uplo_str is either "U" or "L"
char uplo = std::toupper(uplo_str[0]);
bool upper = uplo == 'U' ? true : false;
return _symeig_helper_cuda(self, compute_eigenvectors, upper);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ svd ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template<typename scalar_t>
static void apply_svd(Tensor& self, Tensor& U, Tensor& S, Tensor& VT,
char jobchar, std::vector<int64_t>& infos) {
#ifndef USE_MAGMA
AT_ERROR("svd: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
using value_t = typename c10::scalar_value_type<scalar_t>::type;
auto self_data = self.data_ptr<scalar_t>();
auto U_data = U.data_ptr<scalar_t>();
auto S_data = S.data_ptr<value_t>();
auto VT_data = VT.data_ptr<scalar_t>();
auto self_stride = matrixStride(self);
auto U_stride = matrixStride(U);
auto S_stride = S.size(-1);
auto VT_stride = matrixStride(VT);
auto batchsize = batchCount(self);
magma_vec_t jobz = jobchar == 'A' ? MagmaAllVec : (jobchar == 'S' ? MagmaSomeVec : MagmaNoVec);
magma_int_t m = magma_int_cast(self.size(-2), "m");
magma_int_t n = magma_int_cast(self.size(-1), "n");
auto mn = std::min(m, n);
c10::Storage storage_rwork;
value_t* rwork = nullptr;
magma_int_t* iwork;
ALLOCATE_ARRAY(iwork, magma_int_t, 8 * mn);
if (isComplexType(at::typeMetaToScalarType(self.dtype()))) {
auto lrwork = computeLRWorkDim(jobchar, m, n);
storage_rwork = pin_memory<value_t>(lrwork);
rwork = static_cast<value_t*>(storage_rwork.data());
}
magma_int_t info = 0;
// Run once, first to get the optimum work size.
// Since we deal with batches of matrices with the same dimensions, doing this outside
// the loop saves (batch_size - 1) workspace queries which would provide the same result
// and (batch_size - 1) calls to allocate and deallocate workspace using at::empty()
magma_int_t lwork = -1;
scalar_t wkopt;
magmaSvd<scalar_t, value_t>(jobz, m, n, self_data, m, S_data, U_data, m, VT_data, n, &wkopt, lwork, rwork, iwork, &info);
lwork = magma_int_cast(real_impl<scalar_t, value_t>(wkopt), "work_size");
scalar_t* work;
ALLOCATE_ARRAY(work, scalar_t, lwork);
for (int64_t i = 0; i < batchsize; i++) {
scalar_t* self_working_ptr = &self_data[i * self_stride];
value_t* S_working_ptr = &S_data[i * S_stride];
scalar_t* U_working_ptr = &U_data[i * U_stride];
scalar_t* VT_working_ptr = &VT_data[i * VT_stride];
// Compute S, U (optionally), VT (optionally)
magmaSvd<scalar_t, value_t>(jobz, m, n, self_working_ptr, m,
S_working_ptr, U_working_ptr, m, VT_working_ptr, n, work, lwork, rwork, iwork, &info);
infos[i] = info;
if (info != 0) {
return;
}
}
#endif
}
std::tuple<Tensor, Tensor, Tensor> _svd_helper_cuda(const Tensor& self, bool some, bool compute_uv) {
std::vector<int64_t> infos(batchCount(self), 0);
int64_t m = self.size(-2), n = self.size(-1);
int64_t k = std::min(m, n);
char jobchar = compute_uv ? (some ? 'S' : 'A') : 'N';
Tensor U_working_copy, S_working_copy, VT_working_copy;
std::tie(U_working_copy, S_working_copy, VT_working_copy) = _create_U_S_VT(self, some, compute_uv);
if (self.numel() > 0) {
// The input matrix, U, S and VT have to reside in pinned memory.
// Additionally, the input and U have to be in column major format.
// _create_U_S_VT takes care of a part of these requirements (for U, S and VT)
// For the input matrix, this requirements are being taken care of below.
// Specify strides
auto self_col_major_strides = at::detail::defaultStrides(self.sizes());
self_col_major_strides[self.dim() - 2] = 1;
self_col_major_strides[self.dim() - 1] = m;
// Create strided tensor in pinned memory
auto self_working_copy = at::empty_strided(self.sizes(), self_col_major_strides,
at::TensorOptions(at::kCPU).dtype(self.dtype()).pinned_memory(true));
self_working_copy.copy_(self);
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "svd_cuda", [&] {
apply_svd<scalar_t>(self_working_copy, U_working_copy, S_working_copy, VT_working_copy, jobchar, infos);
});
if (self.dim() > 2) {
batchCheckErrors(infos, "svd_cuda");
} else {
singleCheckErrors(infos[0], "svd_cuda");
}
U_working_copy = same_stride_to(U_working_copy, self.options());
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options());
if (compute_uv) {
if (some) {
VT_working_copy = VT_working_copy.narrow(-1, 0, k);
}
} else {
VT_working_copy.zero_();
U_working_copy.zero_();
}
} else {
U_working_copy = same_stride_to(U_working_copy, self.options()).zero_();
S_working_copy = same_stride_to(S_working_copy, S_working_copy.options().device(self.device()));
VT_working_copy = same_stride_to(VT_working_copy, self.options()).zero_();
}
return std::make_tuple(U_working_copy, S_working_copy, VT_working_copy);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ lu_solve ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
template <typename scalar_t>
static void apply_lu_solve(Tensor& b, const Tensor& lu, const Tensor& pivots, int64_t& info) {
#ifndef USE_MAGMA
AT_ERROR("lu_solve: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto b_data = b.data_ptr<scalar_t>();
auto lu_data = lu.data_ptr<scalar_t>();
auto n = lu.size(-2);
auto nrhs = b.size(-1);
int info_tmp = 0;
if (b.dim() == 2) {
Tensor pivots_tmp = pivots.cpu();
magmaLuSolve<scalar_t>(n, nrhs, lu_data, n, pivots_tmp.data_ptr<magma_int_t>(), b_data, n, &info_tmp);
info = info_tmp;
} else {
auto pivots_data = pivots.data_ptr<magma_int_t>();
auto b_stride = matrixStride(b);
auto lu_stride = matrixStride(lu);
auto pivots_stride = pivots.size(-1);
magma_int_t batch_size = magma_int_cast(batchCount(b), "batchCount");
magma_int_t** pivots_array;
scalar_t** lu_array;
scalar_t** b_array;
ALLOCATE_ARRAY(pivots_array, magma_int_t*, batch_size);
ALLOCATE_ARRAY(lu_array, scalar_t*, batch_size);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size);
for (int64_t i = 0; i < batch_size; i++) {
pivots_array[i] = &pivots_data[i * pivots_stride];
b_array[i] = &b_data[i * b_stride];
lu_array[i] = &lu_data[i * lu_stride];
}
MAGMAQueue magma_queue(b.get_device());
constexpr int64_t batch_limit = 65535;
// Compute as many batches of 65535 possible
// The number of "mini"-batches are floor(batch_size / batch_limit)
// and these cover floor(batch_size / batch_limit) * batch_limit matrix solves
int64_t mini_batches = batch_size / batch_limit, mini_idx;
for (mini_idx = 0; mini_idx < mini_batches * batch_limit; mini_idx += batch_limit) {
scalar_t** lu_array_cur = &lu_array[mini_idx];
scalar_t** b_array_cur = &b_array[mini_idx];
magma_int_t** pivots_array_cur = &pivots_array[mini_idx];
magmaLuSolveBatched<scalar_t>(
n, nrhs, lu_array_cur, n, pivots_array_cur, b_array_cur, n,
info_tmp, batch_limit, magma_queue);
if (info_tmp != 0) {
break;
}
}
// Compute whatever is left = batch_size - floor(batch_size / batch_limit) * batch_limit
// which concisely is equal to batch_size % batch_limit
if (batch_size % batch_limit != 0 && info_tmp == 0) {
magmaLuSolveBatched<scalar_t>(
n, nrhs, &lu_array[mini_idx], n, &pivots_array[mini_idx], &b_array[mini_idx], n,
info_tmp, batch_size % batch_limit, magma_queue);
}
info = info_tmp;
}
#endif
}
Tensor _lu_solve_helper_cuda(const Tensor& self, const Tensor& LU_data, const Tensor& LU_pivots) {
int64_t info = 0;
auto self_working_copy = cloneBatchedColumnMajor(self);
auto LU_data_working_copy = cloneBatchedColumnMajor(LU_data);
auto LU_pivots_working_copy = LU_pivots.is_contiguous() ? LU_pivots : LU_pivots.contiguous();
if (self.numel() == 0 || LU_data.numel() == 0) {
return at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(self.scalar_type(), "lu_solve_cuda", [&]{
apply_lu_solve<scalar_t>(self_working_copy, LU_data_working_copy, LU_pivots_working_copy, info);
});
TORCH_CHECK(info == 0, "MAGMA lu_solve : invalid argument: ", -info);
return self_working_copy;
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
2537c654018fc53be4aa63dbba3e6be9dff42d05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// Code-specific includes
#include <mex.h>
// Declare texture reference for 2D float texture
texture<float, hipTextureType2D, hipReadModeElementType> rf_tex;
// Function prototypes
void cleanup();
void xcorr(int nlhs, mxArray *plhs[], float *rfdata, int nsamps, int nsteps, int nbeams, int srchsz, int kernsz);
// CUDA kernels
#include "xcorr1d_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// Gateway function to MATLAB (main function)
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
if (nrhs != 3)
mexErrMsgTxt("Wrong number of inputs.\n");
if (mxGetClassID(prhs[0]) != mxDOUBLE_CLASS && mxGetClassID(prhs[0]) != mxSINGLE_CLASS)
mexErrMsgTxt("The input rfdata must be of class single or double.\n");
float *rfdata;
// if (mxGetNumberOfDimensions(prhs[0]) > 2)
// mexErrMsgTxt("Only have functionality for 1D xcorr for now. Input must be in (fast time x slow time).\n");
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) {
int numel = mxGetNumberOfElements(prhs[0]);
double *dat = mxGetPr(prhs[0]);
rfdata = (float *)mxMalloc(sizeof(float)*numel);
for (int i = 0; i < numel; i++)
rfdata[i] = (float)dat[i];
}
else
rfdata = (float *)mxGetData(prhs[0]);
int srchsz = mxGetScalar(prhs[1]);
int kernsz = mxGetScalar(prhs[2]);
// Get dimensions of data
mwSize ndims = mxGetNumberOfDimensions(prhs[0]);
const mwSize *dims;
dims = mxGetDimensions(prhs[0]);
int nsamps = dims[0];
int nsteps = dims[1];
int nbeams = 1; for (int i = 2; i < ndims; i++) nbeams *= dims[i];
// Run cross-correlation peak detector script
xcorr(nlhs, plhs, rfdata, nsamps, nsteps, nbeams, srchsz, kernsz);
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS)
mxFree(rfdata);
hipDeviceReset();
}
void xcorr(int nlhs, mxArray *plhs[], float *rfdata, int nsamps, int nsteps, int nbeams, int srchsz, int kernsz) {
hipChannelFormatDesc channelDescFLOAT = hipCreateChannelDesc<float>();
rf_tex.addressMode[0] = hipAddressModeClamp;
rf_tex.addressMode[1] = hipAddressModeClamp;
rf_tex.filterMode = hipFilterModeLinear;
rf_tex.normalized = false;
float *rf_d, *disp_d, *ccs_d;
size_t pitch;
hipMallocPitch(&rf_d, &pitch, sizeof(float)*nsamps, nsteps*nbeams);
hipMalloc((void **)&disp_d,sizeof(float)*nsamps*(nsteps-1)*nbeams);
hipMalloc((void **)&ccs_d, sizeof(float)*nsamps*(nsteps-1)*nbeams);
hipMemcpy2D(rf_d, pitch, rfdata, sizeof(float)*nsamps,
sizeof(float)*nsamps, nsteps*nbeams, hipMemcpyHostToDevice);
hipBindTexture2D(NULL, rf_tex, rf_d, channelDescFLOAT, nsamps, nsteps*nbeams, pitch);
hipMemset(disp_d, 0, sizeof(float)*nsamps*(nsteps-1)*nbeams);
dim3 dimB(16, 16, 1);
dim3 dimG(1, ceil(nsteps/dimB.y)+1, ceil(nbeams/dimB.z)+1);
for (int blk = 0; blk < ceil(nsamps/dimB.x)+1; blk++) {
hipLaunchKernelGGL(( xcorr1d_kernel), dim3(dimG), dim3(dimB), 0, 0, disp_d, ccs_d, nsamps, nsteps, nbeams, srchsz, kernsz, blk);
hipDeviceSynchronize();
}
mwSize dims[3] = {nsamps, nsteps-1, nbeams};
plhs[0] = mxCreateNumericArray(3, dims, mxSINGLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericArray(3, dims, mxSINGLE_CLASS, mxREAL);
float *disp_h, *ccs_h;
disp_h = (float *)mxGetData(plhs[0]);
ccs_h = (float *)mxGetData(plhs[1]);
hipUnbindTexture(rf_tex);
hipFree(rf_d);
hipMemcpy(disp_h, disp_d,sizeof(float)*nsamps*(nsteps-1)*nbeams, hipMemcpyDeviceToHost);
hipMemcpy(ccs_h, ccs_d, sizeof(float)*nsamps*(nsteps-1)*nbeams, hipMemcpyDeviceToHost);
}
| 2537c654018fc53be4aa63dbba3e6be9dff42d05.cu | // System includes
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// Code-specific includes
#include <mex.h>
// Declare texture reference for 2D float texture
texture<float, cudaTextureType2D, cudaReadModeElementType> rf_tex;
// Function prototypes
void cleanup();
void xcorr(int nlhs, mxArray *plhs[], float *rfdata, int nsamps, int nsteps, int nbeams, int srchsz, int kernsz);
// CUDA kernels
#include "xcorr1d_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// Gateway function to MATLAB (main function)
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
if (nrhs != 3)
mexErrMsgTxt("Wrong number of inputs.\n");
if (mxGetClassID(prhs[0]) != mxDOUBLE_CLASS && mxGetClassID(prhs[0]) != mxSINGLE_CLASS)
mexErrMsgTxt("The input rfdata must be of class single or double.\n");
float *rfdata;
// if (mxGetNumberOfDimensions(prhs[0]) > 2)
// mexErrMsgTxt("Only have functionality for 1D xcorr for now. Input must be in (fast time x slow time).\n");
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS) {
int numel = mxGetNumberOfElements(prhs[0]);
double *dat = mxGetPr(prhs[0]);
rfdata = (float *)mxMalloc(sizeof(float)*numel);
for (int i = 0; i < numel; i++)
rfdata[i] = (float)dat[i];
}
else
rfdata = (float *)mxGetData(prhs[0]);
int srchsz = mxGetScalar(prhs[1]);
int kernsz = mxGetScalar(prhs[2]);
// Get dimensions of data
mwSize ndims = mxGetNumberOfDimensions(prhs[0]);
const mwSize *dims;
dims = mxGetDimensions(prhs[0]);
int nsamps = dims[0];
int nsteps = dims[1];
int nbeams = 1; for (int i = 2; i < ndims; i++) nbeams *= dims[i];
// Run cross-correlation peak detector script
xcorr(nlhs, plhs, rfdata, nsamps, nsteps, nbeams, srchsz, kernsz);
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS)
mxFree(rfdata);
cudaDeviceReset();
}
void xcorr(int nlhs, mxArray *plhs[], float *rfdata, int nsamps, int nsteps, int nbeams, int srchsz, int kernsz) {
cudaChannelFormatDesc channelDescFLOAT = cudaCreateChannelDesc<float>();
rf_tex.addressMode[0] = cudaAddressModeClamp;
rf_tex.addressMode[1] = cudaAddressModeClamp;
rf_tex.filterMode = cudaFilterModeLinear;
rf_tex.normalized = false;
float *rf_d, *disp_d, *ccs_d;
size_t pitch;
cudaMallocPitch(&rf_d, &pitch, sizeof(float)*nsamps, nsteps*nbeams);
cudaMalloc((void **)&disp_d,sizeof(float)*nsamps*(nsteps-1)*nbeams);
cudaMalloc((void **)&ccs_d, sizeof(float)*nsamps*(nsteps-1)*nbeams);
cudaMemcpy2D(rf_d, pitch, rfdata, sizeof(float)*nsamps,
sizeof(float)*nsamps, nsteps*nbeams, cudaMemcpyHostToDevice);
cudaBindTexture2D(NULL, rf_tex, rf_d, channelDescFLOAT, nsamps, nsteps*nbeams, pitch);
cudaMemset(disp_d, 0, sizeof(float)*nsamps*(nsteps-1)*nbeams);
dim3 dimB(16, 16, 1);
dim3 dimG(1, ceil(nsteps/dimB.y)+1, ceil(nbeams/dimB.z)+1);
for (int blk = 0; blk < ceil(nsamps/dimB.x)+1; blk++) {
xcorr1d_kernel<<<dimG, dimB, 0>>>(disp_d, ccs_d, nsamps, nsteps, nbeams, srchsz, kernsz, blk);
cudaDeviceSynchronize();
}
mwSize dims[3] = {nsamps, nsteps-1, nbeams};
plhs[0] = mxCreateNumericArray(3, dims, mxSINGLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericArray(3, dims, mxSINGLE_CLASS, mxREAL);
float *disp_h, *ccs_h;
disp_h = (float *)mxGetData(plhs[0]);
ccs_h = (float *)mxGetData(plhs[1]);
cudaUnbindTexture(rf_tex);
cudaFree(rf_d);
cudaMemcpy(disp_h, disp_d,sizeof(float)*nsamps*(nsteps-1)*nbeams, cudaMemcpyDeviceToHost);
cudaMemcpy(ccs_h, ccs_d, sizeof(float)*nsamps*(nsteps-1)*nbeams, cudaMemcpyDeviceToHost);
}
|
292cbe919cd6c174d2c2791753eb0ae548c8dd7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @brief
* log
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <gtest/gtest.h>
#include "k2/csrc/log.h"
namespace k2 {
TEST(Log, Cpu) {
K2_LOG(DEBUG) << "Debug message";
K2_LOG(INFO) << "Info message";
K2_LOG(WARNING) << "Warning message";
K2_LOG(ERROR) << "Error message";
K2_DLOG(INFO) << "This is printed only in debug mode";
int32_t a = 10;
int32_t b = 20;
int32_t c = 30;
int32_t d = a;
K2_DCHECK_EQ(a, d) << "This is checked only in debug mode";
K2_CHECK(a == d) << "failed";
K2_CHECK_EQ(a, d) << "failed";
K2_CHECK_NE(a, b) << "failed";
K2_CHECK_LE(a, b) << "failed";
K2_CHECK_LT(a, b) << "failed";
K2_CHECK_GE(c, b) << "failed";
K2_CHECK_GT(c, b) << "failed";
}
__global__ void DummyKernel(int32_t *b, int32_t a) {
K2_DLOG(INFO) << "In kernel";
K2_DCHECK_LT(*b, a); // enabled only in debug mode
*b += 1;
K2_CHECK_EQ(*b, a);
K2_DLOG(DEBUG) << "Done";
}
TEST(Log, Cuda) {
K2_LOG(INFO) << "Test log for cuda";
int32_t a = 10;
int32_t *b = nullptr;
auto ret = hipMalloc(&b, sizeof(a));
K2_CHECK_EQ(ret, hipSuccess) << "Failed to allocate memory";
ret = hipMemcpy(b, &a, sizeof(a), hipMemcpyHostToDevice);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory to gpu";
hipLaunchKernelGGL(( DummyKernel), dim3(1), dim3(1), 0, 0, b, a + 1);
int32_t c = 0;
ret = hipMemcpy(&c, b, sizeof(a), hipMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory from gpu";
K2_CHECK_EQ(a + 1, c) << "Error in the kernel!";
ret = hipFree(b);
K2_CHECK_CUDA_ERROR(ret) << "Failed to free gpu memory";
}
TEST(LogDeathTest, NegativeCases) {
ASSERT_DEATH(K2_LOG(FATAL) << "This will crash the program", "");
int32_t a = 10;
int32_t b = 20;
int32_t c = a;
ASSERT_DEATH(K2_CHECK_EQ(a, b), "");
ASSERT_DEATH(K2_CHECK_NE(a, c), "");
ASSERT_DEATH(K2_CHECK_LE(b, a), "");
ASSERT_DEATH(K2_CHECK_LT(b, a), "");
ASSERT_DEATH(K2_CHECK_GE(a, b), "");
ASSERT_DEATH(K2_CHECK_GT(a, b), "");
auto ret = hipErrorMemoryAllocation;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
ret = hipErrorAssert;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
// NOTE: normally we do not need to
// check if NDEBUG is defined in order
// to use K2_DCHECK_*. ASSERT_DEATH
// expects that the statement will make
// the program crash and this is only
// possible for the debug build,
// so we have to add a guard here.
#if !defined(NDEBUG)
K2_LOG(INFO) << "Check for debug build";
ASSERT_DEATH(K2_DLOG(FATAL) << "This will crash the program", "");
ASSERT_DEATH(K2_DCHECK_EQ(a, b), "");
ASSERT_DEATH(K2_DCHECK_NE(a, c), "");
ASSERT_DEATH(K2_DCHECK_LE(b, a), "");
ASSERT_DEATH(K2_DCHECK_LT(b, a), "");
ASSERT_DEATH(K2_DCHECK_GE(a, b), "");
ASSERT_DEATH(K2_DCHECK_GT(a, b), "");
ret = hipErrorInitializationError;
ASSERT_DEATH(K2_DCHECK_CUDA_ERROR(ret), "");
#endif
}
} // namespace k2
| 292cbe919cd6c174d2c2791753eb0ae548c8dd7d.cu | /**
* @brief
* log
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <gtest/gtest.h>
#include "k2/csrc/log.h"
namespace k2 {
TEST(Log, Cpu) {
K2_LOG(DEBUG) << "Debug message";
K2_LOG(INFO) << "Info message";
K2_LOG(WARNING) << "Warning message";
K2_LOG(ERROR) << "Error message";
K2_DLOG(INFO) << "This is printed only in debug mode";
int32_t a = 10;
int32_t b = 20;
int32_t c = 30;
int32_t d = a;
K2_DCHECK_EQ(a, d) << "This is checked only in debug mode";
K2_CHECK(a == d) << "failed";
K2_CHECK_EQ(a, d) << "failed";
K2_CHECK_NE(a, b) << "failed";
K2_CHECK_LE(a, b) << "failed";
K2_CHECK_LT(a, b) << "failed";
K2_CHECK_GE(c, b) << "failed";
K2_CHECK_GT(c, b) << "failed";
}
__global__ void DummyKernel(int32_t *b, int32_t a) {
K2_DLOG(INFO) << "In kernel";
K2_DCHECK_LT(*b, a); // enabled only in debug mode
*b += 1;
K2_CHECK_EQ(*b, a);
K2_DLOG(DEBUG) << "Done";
}
TEST(Log, Cuda) {
K2_LOG(INFO) << "Test log for cuda";
int32_t a = 10;
int32_t *b = nullptr;
auto ret = cudaMalloc(&b, sizeof(a));
K2_CHECK_EQ(ret, cudaSuccess) << "Failed to allocate memory";
ret = cudaMemcpy(b, &a, sizeof(a), cudaMemcpyHostToDevice);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory to gpu";
DummyKernel<<<1, 1>>>(b, a + 1);
int32_t c = 0;
ret = cudaMemcpy(&c, b, sizeof(a), cudaMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret) << "Failed to copy memory from gpu";
K2_CHECK_EQ(a + 1, c) << "Error in the kernel!";
ret = cudaFree(b);
K2_CHECK_CUDA_ERROR(ret) << "Failed to free gpu memory";
}
TEST(LogDeathTest, NegativeCases) {
ASSERT_DEATH(K2_LOG(FATAL) << "This will crash the program", "");
int32_t a = 10;
int32_t b = 20;
int32_t c = a;
ASSERT_DEATH(K2_CHECK_EQ(a, b), "");
ASSERT_DEATH(K2_CHECK_NE(a, c), "");
ASSERT_DEATH(K2_CHECK_LE(b, a), "");
ASSERT_DEATH(K2_CHECK_LT(b, a), "");
ASSERT_DEATH(K2_CHECK_GE(a, b), "");
ASSERT_DEATH(K2_CHECK_GT(a, b), "");
auto ret = cudaErrorMemoryAllocation;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
ret = cudaErrorAssert;
ASSERT_DEATH(K2_CHECK_CUDA_ERROR(ret), "");
// NOTE: normally we do not need to
// check if NDEBUG is defined in order
// to use K2_DCHECK_*. ASSERT_DEATH
// expects that the statement will make
// the program crash and this is only
// possible for the debug build,
// so we have to add a guard here.
#if !defined(NDEBUG)
K2_LOG(INFO) << "Check for debug build";
ASSERT_DEATH(K2_DLOG(FATAL) << "This will crash the program", "");
ASSERT_DEATH(K2_DCHECK_EQ(a, b), "");
ASSERT_DEATH(K2_DCHECK_NE(a, c), "");
ASSERT_DEATH(K2_DCHECK_LE(b, a), "");
ASSERT_DEATH(K2_DCHECK_LT(b, a), "");
ASSERT_DEATH(K2_DCHECK_GE(a, b), "");
ASSERT_DEATH(K2_DCHECK_GT(a, b), "");
ret = cudaErrorInitializationError;
ASSERT_DEATH(K2_DCHECK_CUDA_ERROR(ret), "");
#endif
}
} // namespace k2
|
13fe6555b4899539edea41b4f8106adde2aa02b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "FERNIntegrator.cuh"
#include "Globals.cuh"
#include "kernels_hip.cuh"
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <time.h>
FERNIntegrator::FERNIntegrator()
{
// TODO
// Hardcoded for now. Make this the default for a command line
// argument or something.
blocks.x = 1;
threads.x = 512;
}
FERNIntegrator::~FERNIntegrator()
{
// TODO
// Implement the freeing methods
// devReactionData.hipFree();
// devNetworkData.hipFree();
// reactionData.free();
// networkData.free();
}
void FERNIntegrator::initializeCuda()
{
// Ensure that a valid device (GPU) exists
printf("Checking for valid device...\n");
printf("\n");
devcheck(0);
// Check available memory on the GPU
size_t msizeFree;
size_t msizeTotal;
hipMemGetInfo(&msizeFree, &msizeTotal);
printf("GPU total memory: %d\n", (int)msizeTotal);
printf("GPU free memory: %d\n", (int)msizeFree);
// Following memory queries not supported on GF 8600 GT
size_t msize;
hipDeviceGetLimit(&msize, hipLimitMallocHeapSize);
printf("GPU heap size: %d\n", (int)msize);
hipDeviceGetLimit(&msize, hipLimitStackSize);
printf("GPU stack size: %d\n", (int)msize);
// Set and print the printf FIFO size
size_t printfSize = 1 << 20; // 1 MiB
hipDeviceSetLimit(hipLimitPrintfFifoSize, printfSize);
hipDeviceGetLimit(&printfSize, hipLimitPrintfFifoSize);
printf("printf FIFO size: %lu\n", printfSize);
// Set the shared memory size
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
printf("\n");
}
void FERNIntegrator::prepareKernel()
{
printf("Preparing kernel...\n");
// The network should be copied to the device
// before the integration kernel is launched.
// The memory blocks will last the lifetime of the FERNIntegrator.
devNetwork.setSizes(network);
devNetwork.cudaAllocate();
checkCudaErrors();
devNetwork.cudaCopy(network, hipMemcpyHostToDevice);
checkCudaErrors();
}
void FERNIntegrator::integrate(IntegrationData &integrationData)
{
// Set up stream
hipStream_t stream;
hipStreamCreate(&stream);
// The IntegrationData should be copied to the device upon each
// integration.
printf("Copying integration data...\n");
IntegrationData devIntegrationData;
devIntegrationData.cudaAllocate(network.species);
devIntegrationData.cudaCopy(integrationData, hipMemcpyHostToDevice);
checkCudaErrors();
Globals devGlobals;
devGlobals.cudaAllocate(network);
Globals *devGlobalsPtr;
hipMalloc(&devGlobalsPtr, sizeof(Globals));
hipMemcpy(devGlobalsPtr, &devGlobals, sizeof(Globals), hipMemcpyHostToDevice);
checkCudaErrors();
// Set up shared memory
size_t sharedSize = integrateNetwork_sharedSize(network);
printf("%d bytes of shared memory allocated.\n", (int) sharedSize);
// Set up timer
hipEvent_t start, end;
float duration;
hipEventCreate(&start);
hipEventCreate(&end);
printf("Launching integration kernel...\n");
hipEventRecord(start);
hipLaunchKernelGGL(( integrateNetwork), dim3(blocks), dim3(threads), sharedSize, stream,
devNetwork,
devIntegrationData,
devGlobalsPtr
);
hipStreamSynchronize(stream);
hipEventRecord(end);
hipEventSynchronize(end);
checkCudaErrors();
hipEventElapsedTime(&duration, start, end);
printf("Kernel has finished in %f seconds\n", duration / 1000.0);
integrationData.cudaCopy(devIntegrationData, hipMemcpyDeviceToHost);
checkCudaErrors();
// TODO
// Clean up the device IntegrationData
// devIntegrationData.hipFree();
// devGlobals.hipFree();
hipStreamDestroy(stream);
}
void FERNIntegrator::checkCudaErrors()
{
// Sync the default stream before getting the last error
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
printf("***CUDA error: %s\n", hipGetErrorString(error));
// Crash
abort();
}
}
// Copied from http://www.ncsa.illinois.edu/UserInfo/Training/Workshops/
// CUDA/presentations/tutorial-CUDA.html
void FERNIntegrator::devcheck(int gpudevice)
{
int device_count;
int device;
// Get the number of non-emulation devices detected
hipGetDeviceCount(&device_count);
if (gpudevice > device_count)
{
printf("gpudevice >= device_count ... exiting\n");
exit(1);
}
hipError_t cudareturn;
hipDeviceProp_t deviceProp;
// hipGetDeviceProperties() is also demonstrated in the deviceQuery example
// of the sdk projects directory
hipGetDeviceProperties(&deviceProp, gpudevice);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n",
deviceProp.major, deviceProp.minor);
if (deviceProp.major > 999)
{
printf("warning, CUDA Device Emulation (CPU) detected, exiting\n");
exit(1);
}
// choose a cuda device for kernel execution
cudareturn = hipSetDevice(gpudevice);
if (cudareturn == hipErrorInvalidDevice)
{
printf("hipSetDevice returned hipErrorInvalidDevice\n");
exit(1);
}
else
{
// double check that device was properly selected
hipGetDevice(&device);
printf("hipGetDevice()=%d\n", device);
}
}
| 13fe6555b4899539edea41b4f8106adde2aa02b7.cu |
#include "FERNIntegrator.cuh"
#include "Globals.cuh"
#include "kernels.cuh"
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <time.h>
FERNIntegrator::FERNIntegrator()
{
// TODO
// Hardcoded for now. Make this the default for a command line
// argument or something.
blocks.x = 1;
threads.x = 512;
}
FERNIntegrator::~FERNIntegrator()
{
// TODO
// Implement the freeing methods
// devReactionData.cudaFree();
// devNetworkData.cudaFree();
// reactionData.free();
// networkData.free();
}
void FERNIntegrator::initializeCuda()
{
// Ensure that a valid device (GPU) exists
printf("Checking for valid device...\n");
printf("\n");
devcheck(0);
// Check available memory on the GPU
size_t msizeFree;
size_t msizeTotal;
cudaMemGetInfo(&msizeFree, &msizeTotal);
printf("GPU total memory: %d\n", (int)msizeTotal);
printf("GPU free memory: %d\n", (int)msizeFree);
// Following memory queries not supported on GF 8600 GT
size_t msize;
cudaDeviceGetLimit(&msize, cudaLimitMallocHeapSize);
printf("GPU heap size: %d\n", (int)msize);
cudaDeviceGetLimit(&msize, cudaLimitStackSize);
printf("GPU stack size: %d\n", (int)msize);
// Set and print the printf FIFO size
size_t printfSize = 1 << 20; // 1 MiB
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, printfSize);
cudaDeviceGetLimit(&printfSize, cudaLimitPrintfFifoSize);
printf("printf FIFO size: %lu\n", printfSize);
// Set the shared memory size
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
printf("\n");
}
void FERNIntegrator::prepareKernel()
{
printf("Preparing kernel...\n");
// The network should be copied to the device
// before the integration kernel is launched.
// The memory blocks will last the lifetime of the FERNIntegrator.
devNetwork.setSizes(network);
devNetwork.cudaAllocate();
checkCudaErrors();
devNetwork.cudaCopy(network, cudaMemcpyHostToDevice);
checkCudaErrors();
}
void FERNIntegrator::integrate(IntegrationData &integrationData)
{
// Set up stream
cudaStream_t stream;
cudaStreamCreate(&stream);
// The IntegrationData should be copied to the device upon each
// integration.
printf("Copying integration data...\n");
IntegrationData devIntegrationData;
devIntegrationData.cudaAllocate(network.species);
devIntegrationData.cudaCopy(integrationData, cudaMemcpyHostToDevice);
checkCudaErrors();
Globals devGlobals;
devGlobals.cudaAllocate(network);
Globals *devGlobalsPtr;
cudaMalloc(&devGlobalsPtr, sizeof(Globals));
cudaMemcpy(devGlobalsPtr, &devGlobals, sizeof(Globals), cudaMemcpyHostToDevice);
checkCudaErrors();
// Set up shared memory
size_t sharedSize = integrateNetwork_sharedSize(network);
printf("%d bytes of shared memory allocated.\n", (int) sharedSize);
// Set up timer
cudaEvent_t start, end;
float duration;
cudaEventCreate(&start);
cudaEventCreate(&end);
printf("Launching integration kernel...\n");
cudaEventRecord(start);
integrateNetwork<<<blocks, threads, sharedSize, stream>>>(
devNetwork,
devIntegrationData,
devGlobalsPtr
);
cudaStreamSynchronize(stream);
cudaEventRecord(end);
cudaEventSynchronize(end);
checkCudaErrors();
cudaEventElapsedTime(&duration, start, end);
printf("Kernel has finished in %f seconds\n", duration / 1000.0);
integrationData.cudaCopy(devIntegrationData, cudaMemcpyDeviceToHost);
checkCudaErrors();
// TODO
// Clean up the device IntegrationData
// devIntegrationData.cudaFree();
// devGlobals.cudaFree();
cudaStreamDestroy(stream);
}
void FERNIntegrator::checkCudaErrors()
{
// Sync the default stream before getting the last error
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("***CUDA error: %s\n", cudaGetErrorString(error));
// Crash
abort();
}
}
// Copied from http://www.ncsa.illinois.edu/UserInfo/Training/Workshops/
// CUDA/presentations/tutorial-CUDA.html
void FERNIntegrator::devcheck(int gpudevice)
{
int device_count;
int device;
// Get the number of non-emulation devices detected
cudaGetDeviceCount(&device_count);
if (gpudevice > device_count)
{
printf("gpudevice >= device_count ... exiting\n");
exit(1);
}
cudaError_t cudareturn;
cudaDeviceProp deviceProp;
// cudaGetDeviceProperties() is also demonstrated in the deviceQuery example
// of the sdk projects directory
cudaGetDeviceProperties(&deviceProp, gpudevice);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n",
deviceProp.major, deviceProp.minor);
if (deviceProp.major > 999)
{
printf("warning, CUDA Device Emulation (CPU) detected, exiting\n");
exit(1);
}
// choose a cuda device for kernel execution
cudareturn = cudaSetDevice(gpudevice);
if (cudareturn == cudaErrorInvalidDevice)
{
printf("cudaSetDevice returned cudaErrorInvalidDevice\n");
exit(1);
}
else
{
// double check that device was properly selected
cudaGetDevice(&device);
printf("cudaGetDevice()=%d\n", device);
}
}
|
b1161e5a56120cdca85e6621a99c0b949c952d20.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
#include "../include/ContAcq-IntClk.h"
// includes CUDA
#include <hip/hip_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
#define ITERATIONS REPLACE_ITERATIONS
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
texture<float,1,hipReadModeElementType> texmem5;
texture<float,1,hipReadModeElementType> texmem6;
texture<float,1,hipReadModeElementType> texmem7;
texture<float,1,hipReadModeElementType> texmem9;
texture<float,1,hipReadModeElementType> texmem8;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
__constant__ float ConstArray3[THREADS_PER_BLOCK];
__constant__ float ConstArray4[THREADS_PER_BLOCK];
__constant__ float ConstArray5[THREADS_PER_BLOCK];
__constant__ float ConstArray6[THREADS_PER_BLOCK];
__constant__ float ConstArray7[THREADS_PER_BLOCK];
__constant__ float ConstArray8[THREADS_PER_BLOCK];
__global__ void tex_bm_kernel( float* out, unsigned size)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
__device__ __shared__ float I5[THREADS_PER_BLOCK];
__device__ __shared__ float I6[THREADS_PER_BLOCK];
__device__ __shared__ float I7[THREADS_PER_BLOCK];
__device__ __shared__ float I8[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = tid;
I2[tid%THREADS_PER_BLOCK] = tid/2;
I3[tid%THREADS_PER_BLOCK] = 2*tid;
I4[tid%THREADS_PER_BLOCK] = tid+2;
I5[tid%THREADS_PER_BLOCK] = 5*tid;
I6[tid%THREADS_PER_BLOCK] = tid/2;
I7[tid%THREADS_PER_BLOCK] = tid*10;
I8[tid%THREADS_PER_BLOCK] = tid/2;
if(tid < size){
for(unsigned i=0; i<ITERATIONS; ++i){
out[tid] = tex1Dfetch(texmem1,tid);
out[tid*2] = tex1Dfetch(texmem2,tid)+ConstArray1[(tid+i)%THREADS_PER_BLOCK];
out[tid*3] = tex1Dfetch(texmem3,tid)* I1[(tid+i)%THREADS_PER_BLOCK];
out[tid*4] = tex1Dfetch(texmem4,tid)*I2[tid%THREADS_PER_BLOCK];
out[tid*5] =tex1Dfetch(texmem5,tid)/ConstArray2[(tid+i)%THREADS_PER_BLOCK];
out[tid*6] = tex1Dfetch(texmem6,tid)+I6[(tid+i)%THREADS_PER_BLOCK];
out[tid*7] = tex1Dfetch(texmem7,tid)+I4[(tid+i)%THREADS_PER_BLOCK];
out[tid*8] = exp(tex1Dfetch(texmem8,tid));
out[tid*9] = sqrt(abs(tex1Dfetch(texmem9,tid)));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
float array3[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array3[i] = rand() / RAND_MAX;
}
float array4[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array4[i] = rand() / RAND_MAX;
}
float array5[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array5[i] = rand() / RAND_MAX;
}
float array6[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array6[i] = rand() / RAND_MAX;
}
float array7[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array7[i] = rand() / RAND_MAX;
}
float array8[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array8[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray3", array3, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray4", array4, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray5", array5, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray6", array6, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray7", array7, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray8", array8, sizeof(float) * THREADS_PER_BLOCK );
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
hipMalloc((void**) &device_texture1, texmem_size);
hipMalloc((void**) &device_texture2, texmem_size);
hipMalloc((void**) &device_texture3, texmem_size);
hipMalloc((void**) &device_texture4, texmem_size);
hipMalloc((void**) &device_texture5, texmem_size);
hipMalloc((void**) &device_texture6, texmem_size);
hipMalloc((void**) &device_texture7, texmem_size);
hipMalloc((void**) &device_texture8, texmem_size);
hipMalloc((void**) &device_texture9, texmem_size);
hipMalloc((void**) &device_out, texmem_size*10);
hipMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, texmem_size);
hipBindTexture(0, texmem2, device_texture2, texmem_size);
hipBindTexture(0, texmem3, device_texture3, texmem_size);
hipBindTexture(0, texmem4, device_texture4, texmem_size);
hipBindTexture(0, texmem5, device_texture5, texmem_size);
hipBindTexture(0, texmem6, device_texture6, texmem_size);
hipBindTexture(0, texmem7, device_texture7, texmem_size);
hipBindTexture(0, texmem8, device_texture8, texmem_size);
hipBindTexture(0, texmem9, device_texture9, texmem_size);
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( tex_bm_kernel), dim3(grid), dim3(threads), 0 , 0, device_out, texmem_size);
hipDeviceSynchronize();
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
printf("Kernel DONE, probably correctly\n");
hipMemcpy(host_out, device_out, texmem_size*sizeof(float), hipMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| b1161e5a56120cdca85e6621a99c0b949c952d20.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include <string.h>
#include <cuda.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
#include "../include/ContAcq-IntClk.h"
// includes CUDA
#include <cuda_runtime.h>
#define MAX_THREADS_PER_BLOCK 256
#define THREADS_PER_BLOCK 256
#define LINE_SIZE 128
#define SETS 4
#define ASSOC 24
#define SIMD_WIDTH 32
// Variables
int no_of_nodes;
int edge_list_size;
FILE *fp;
//Structure to hold a node information
struct Node
{
int starting;
int no_of_edges;
};
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
#define ITERATIONS REPLACE_ITERATIONS
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
texture<float,1,cudaReadModeElementType> texmem5;
texture<float,1,cudaReadModeElementType> texmem6;
texture<float,1,cudaReadModeElementType> texmem7;
texture<float,1,cudaReadModeElementType> texmem9;
texture<float,1,cudaReadModeElementType> texmem8;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
__constant__ float ConstArray3[THREADS_PER_BLOCK];
__constant__ float ConstArray4[THREADS_PER_BLOCK];
__constant__ float ConstArray5[THREADS_PER_BLOCK];
__constant__ float ConstArray6[THREADS_PER_BLOCK];
__constant__ float ConstArray7[THREADS_PER_BLOCK];
__constant__ float ConstArray8[THREADS_PER_BLOCK];
__global__ void tex_bm_kernel( float* out, unsigned size)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
__device__ __shared__ float I3[THREADS_PER_BLOCK];
__device__ __shared__ float I4[THREADS_PER_BLOCK];
__device__ __shared__ float I5[THREADS_PER_BLOCK];
__device__ __shared__ float I6[THREADS_PER_BLOCK];
__device__ __shared__ float I7[THREADS_PER_BLOCK];
__device__ __shared__ float I8[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = tid;
I2[tid%THREADS_PER_BLOCK] = tid/2;
I3[tid%THREADS_PER_BLOCK] = 2*tid;
I4[tid%THREADS_PER_BLOCK] = tid+2;
I5[tid%THREADS_PER_BLOCK] = 5*tid;
I6[tid%THREADS_PER_BLOCK] = tid/2;
I7[tid%THREADS_PER_BLOCK] = tid*10;
I8[tid%THREADS_PER_BLOCK] = tid/2;
if(tid < size){
for(unsigned i=0; i<ITERATIONS; ++i){
out[tid] = tex1Dfetch(texmem1,tid);
out[tid*2] = tex1Dfetch(texmem2,tid)+ConstArray1[(tid+i)%THREADS_PER_BLOCK];
out[tid*3] = tex1Dfetch(texmem3,tid)* I1[(tid+i)%THREADS_PER_BLOCK];
out[tid*4] = tex1Dfetch(texmem4,tid)*I2[tid%THREADS_PER_BLOCK];
out[tid*5] =tex1Dfetch(texmem5,tid)/ConstArray2[(tid+i)%THREADS_PER_BLOCK];
out[tid*6] = tex1Dfetch(texmem6,tid)+I6[(tid+i)%THREADS_PER_BLOCK];
out[tid*7] = tex1Dfetch(texmem7,tid)+I4[(tid+i)%THREADS_PER_BLOCK];
out[tid*8] = exp(tex1Dfetch(texmem8,tid));
out[tid*9] = sqrt(abs(tex1Dfetch(texmem9,tid)));
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Main Program
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
float array3[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array3[i] = rand() / RAND_MAX;
}
float array4[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array4[i] = rand() / RAND_MAX;
}
float array5[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array5[i] = rand() / RAND_MAX;
}
float array6[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array6[i] = rand() / RAND_MAX;
}
float array7[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array7[i] = rand() / RAND_MAX;
}
float array8[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array8[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray3", array3, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray4", array4, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray5", array5, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray6", array6, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray7", array7, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray8", array8, sizeof(float) * THREADS_PER_BLOCK );
int texmem_size = LINE_SIZE*SETS*ASSOC;
float *host_texture1 = (float*) malloc(texmem_size*sizeof(float));
for (int i=0; i< texmem_size; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
float *device_texture5;
float *device_texture6;
float *device_texture7;
float *device_texture8;
float *device_texture9;
float *host_out = (float*) malloc(texmem_size*sizeof(float)*10);
float *device_out;
cudaMalloc((void**) &device_texture1, texmem_size);
cudaMalloc((void**) &device_texture2, texmem_size);
cudaMalloc((void**) &device_texture3, texmem_size);
cudaMalloc((void**) &device_texture4, texmem_size);
cudaMalloc((void**) &device_texture5, texmem_size);
cudaMalloc((void**) &device_texture6, texmem_size);
cudaMalloc((void**) &device_texture7, texmem_size);
cudaMalloc((void**) &device_texture8, texmem_size);
cudaMalloc((void**) &device_texture9, texmem_size);
cudaMalloc((void**) &device_out, texmem_size*10);
cudaMemcpy(device_texture1, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture5, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture6, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture7, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture8, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_texture9, host_texture1, texmem_size*sizeof(float), cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, texmem_size);
cudaBindTexture(0, texmem2, device_texture2, texmem_size);
cudaBindTexture(0, texmem3, device_texture3, texmem_size);
cudaBindTexture(0, texmem4, device_texture4, texmem_size);
cudaBindTexture(0, texmem5, device_texture5, texmem_size);
cudaBindTexture(0, texmem6, device_texture6, texmem_size);
cudaBindTexture(0, texmem7, device_texture7, texmem_size);
cudaBindTexture(0, texmem8, device_texture8, texmem_size);
cudaBindTexture(0, texmem9, device_texture9, texmem_size);
unsigned num_blocks = (texmem_size / MAX_THREADS_PER_BLOCK) + 1;
dim3 grid( num_blocks, 1, 1);
dim3 threads( MAX_THREADS_PER_BLOCK, 1, 1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
tex_bm_kernel<<< grid, threads, 0 >>>(device_out, texmem_size);
cudaThreadSynchronize();
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
printf("Kernel DONE, probably correctly\n");
cudaMemcpy(host_out, device_out, texmem_size*sizeof(float), cudaMemcpyDeviceToHost);
/*
printf("Output: ");
float error = false;
for (int i=0; i< texmem_size; i++){
printf("%.1f ", host_out[i]);
if (host_out[i] - i > 0.0001) error = true;
}
printf("\n");
if (error) printf("\nFAILED\n");
else printf("\nPASSED\n");
*/
}
void CleanupResources(void){
// Free device memory
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
511528dc4f1ddd9999c8c8822940696a5714be72.hip | // !!! This is a file automatically generated by hipify!!!
#define VERSION_STRING "1.0.0.7"
#define TOOL_NAME "AmoveoMinerGpuCuda"
#include <iostream>
#include <chrono>
#include <cmath>
#include <thread>
#include <iomanip>
#include <string>
#include <cassert>
#include <vector>
#include <random>
#include <climits>
#include <algorithm>
#include <functional>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "sha256.cuh"
#include "stdlib.h"
#include <future>
#include <numeric>
#include <chrono>
#include <cpprest/asyncrt_utils.h>
#include "poolApi.h"
#include "base64.h"
#include "unistd.h"
using namespace std;
using namespace std::chrono;
using namespace utility; // Common utilities like string conversions
#define FETCH_WORK_INTERVAL_MS 9000
#define SHOW_INTERVAL_MS 4000
int gElapsedMilliSecMax = FETCH_WORK_INTERVAL_MS;
//#define POOL_URL "http://localhost:32371/work" // local pool
#define POOL_URL "http://amoveopool2.com/work"
#define MINER_ADDRESS "BOPvbgrso8GakBw2Xxkc1A2lt0OiKg/JqjBuCPfP0bTI/djsM9lgp/8ZMmJgPs/aDlxQL2dT+PYfEywsaRthrmE="
#define DEFAULT_DEVICE_ID 0
string gMinerPublicKeyBase64(MINER_ADDRESS);
string gPoolUrl(POOL_URL);
string_t gPoolUrlW;
int gDevicdeId = DEFAULT_DEVICE_ID;
int gPoolType = 0; // 0: amoveopool, 1: original pool
PoolApi gPoolApi;
WorkData gWorkData;
std::mutex mutexWorkData;
uint64_t gTotalNonce = 0;
// First timestamp when program starts
static std::chrono::high_resolution_clock::time_point t1;
// Last timestamp we printed debug info
static std::chrono::high_resolution_clock::time_point t_last_updated;
static std::chrono::high_resolution_clock::time_point t_last_work_fetch;
__device__ bool checkResult(unsigned char* h, size_t diff) {
unsigned int x = 0;
unsigned int z = 0;
for (int i = 0; i < 31; i++) {
if (h[i] == 0) {
x += 8;
continue;
}
else if (h[i] < 2) {
x += 7;
z = h[i+1];
}
else if (h[i] < 4) {
x += 6;
z = (h[i+1] / 2) + ((h[i] % 2) * 128);
}
else if (h[i] < 8) {
x += 5;
z = (h[i+1] / 4) + ((h[i] % 4) * 64);
}
else if (h[i] < 16) {
x += 4;
z = (h[i+1] / 8) + ((h[i] % 8) * 32);
}
else if (h[i] < 32) {
x += 3;
z = (h[i+1] / 16) + ((h[i] % 16) * 16);
}
else if (h[i] < 64) {
x += 2;
z = (h[i+1] / 32) + ((h[i] % 32) * 8);
}
else if (h[i] < 128) {
x += 1;
z = (h[i+1] / 64) + ((h[i] % 64) * 4);
}
else {
z = (h[i+1] / 128) + ((h[i] % 128) * 2);
}
break;
}
return(((256 * x) + z) >= diff);
}
#define SUFFIX_MAX 65536
__global__ void sha256_kernel(unsigned char * out_nonce, int *out_found, const SHA256_CTX * in_ctx, uint64_t nonceOffset, int shareDiff, int suffixMax)
{
__shared__ SHA256_CTX ctxShared;
__shared__ int diff;
__shared__ uint64_t nonceOff;
// If this is the first thread of the block, init the input string in shared memory
if (threadIdx.x == 0) {
memcpy(&ctxShared, in_ctx, 0x70);
diff = shareDiff;
nonceOff = nonceOffset;
}
__syncthreads(); // Ensure the input string has been written in SMEM
unsigned int threadIndex = threadIdx.x;
uint64_t currentBlockIdx = blockIdx.x * blockDim.x + threadIdx.x + nonceOff;
unsigned char shaResult[32];
SHA256_CTX ctxReuse;
memcpy(&ctxReuse, &ctxShared, 0x70);
sha256_update(&ctxReuse, (BYTE*)¤tBlockIdx, 6);
sha256_updateAmoveoSpecial(&ctxReuse);
SHA256_CTX ctxTmp;
int nonceSuffix = 0;
for (nonceSuffix = 0; nonceSuffix < suffixMax; nonceSuffix++) {
memcpy(&ctxTmp, &ctxReuse, 0x70);
sha256_finalAmoveo(&ctxTmp, (BYTE*)&nonceSuffix, shaResult);
if (checkResult(shaResult, diff) && atomicExch(out_found, 1) == 0) {
memcpy(out_nonce, ¤tBlockIdx, 6);
memcpy(out_nonce + 6, &nonceSuffix, 2);
return;
}
}
}
__global__ void sha256Init_kernel(unsigned char * out_ctx, unsigned char * bhash, unsigned char * noncePart, int diff)
{
SHA256_CTX ctx;
unsigned char bhashLocal[32];
unsigned char nonceLocal[15];
memcpy(bhashLocal, bhash, 32);
memcpy(nonceLocal, noncePart, 15);
sha256_init(&ctx);
sha256_update(&ctx, bhashLocal, 32);
sha256_update(&ctx, nonceLocal, 15);
memcpy(out_ctx, &ctx, 0x70);
}
void pre_sha256() {
checkCudaErrors(hipMemcpyToSymbol(dev_k, host_k, sizeof(host_k), 0, hipMemcpyHostToDevice));
}
// Prints a 32 bytes sha256 to the hexadecimal form filled with zeroes
void print_hash(const unsigned char* sha256) {
for (size_t i = 0; i < 32; ++i) {
std::cout << std::hex << std::setfill('0') << std::setw(2) << static_cast<int>(sha256[i]);
}
std::cout << std::dec << std::endl;
}
bool isTimeToGetNewWork()
{
std::chrono::high_resolution_clock::time_point tNow = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> lastWorkFetchInterval = tNow - t_last_work_fetch;
if (lastWorkFetchInterval.count() > gElapsedMilliSecMax) {
t_last_work_fetch = tNow;
return true;
}
return false;
}
void print_state() {
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> last_show_interval = t2 - t_last_updated;
if (last_show_interval.count() > SHOW_INTERVAL_MS) {
t_last_updated = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> span = t2 - t1;
float ratio = span.count() / 1000;
std::cout << std::fixed << static_cast<uint64_t>(gTotalNonce / ratio) << " h/s " << endl;
}
}
static bool getwork_thread(std::seed_seq &seed)
{
std::independent_bits_engine<std::default_random_engine, 32, uint32_t> randomBytesEngine(seed);
unsigned char ctxBuf[0x70];
unsigned char *d_bhash = nullptr;
unsigned char *d_nonce = nullptr;
hipMalloc(&d_bhash, 32);
hipMalloc(&d_nonce, 15);
unsigned char * outCtx = nullptr;
hipMalloc(&outCtx, 0x70);
while (true)
{
WorkData workDataNew;
gPoolApi.GetWork(gPoolUrlW, &workDataNew, gMinerPublicKeyBase64, gPoolType);
// Check if new work unit is actually different than what we currently have
if (memcmp(&gWorkData.bhash[0], &workDataNew.bhash[0], 32) != 0) {
mutexWorkData.lock();
std::generate(begin(gWorkData.nonce), end(gWorkData.nonce), std::ref(randomBytesEngine));
gWorkData.bhash = workDataNew.bhash;
gWorkData.blockDifficulty = workDataNew.blockDifficulty;
gWorkData.shareDifficulty = workDataNew.shareDifficulty;
hipMemcpy(d_bhash, &gWorkData.bhash[0], 32, hipMemcpyHostToDevice);
hipMemcpy(d_nonce, &gWorkData.nonce[0], 15, hipMemcpyHostToDevice);
sha256Init_kernel << < 1, 1 >> > (outCtx, d_bhash, d_nonce, gWorkData.blockDifficulty);
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess) {
std::cout << "getwork_thread Cuda Error: " << hipGetErrorString(err) << std::endl;
throw std::runtime_error("getwork_thread Device error");
}
hipMemcpy(ctxBuf, outCtx, 0x70, hipMemcpyDeviceToHost);
//SHA256_CTX ctx;
//memcpy(&ctx, outCtx, sizeof(SHA256_CTX));
gWorkData.setCtx(ctxBuf);
mutexWorkData.unlock();
std::cout << "New Work ||" << "BDiff:" << gWorkData.blockDifficulty << " SDiff:" << gWorkData.shareDifficulty << endl;
}
else {
// Even if new work is not available, shareDiff will likely change. Need to adjust, else will get a "low diff share" error.
gWorkData.shareDifficulty = workDataNew.shareDifficulty;
}
usleep(2000000);
}
hipFree(outCtx);
hipFree(d_bhash);
hipFree(d_nonce);
return true;
}
static void submitwork_thread(unsigned char * nonceSolution)
{
gPoolApi.SubmitWork(gPoolUrlW, base64_encode(nonceSolution, 23), gMinerPublicKeyBase64);
cout << "--- Found Share --- " << endl;
}
int gBlockSize = 64;
int gNumBlocks = 96;
int gSuffixMax = 65536;
std::string gSeedStr("ImAraNdOmStrInG");
int main(int argc, char* argv[])
{
cout << TOOL_NAME << " v" << VERSION_STRING << endl;
if (argc <= 1) {
cout << "Example Template: " << endl;
cout << argv[0] << " " << "<Base64AmoveoAddress>" << " " << "<CudaDeviceId>" << " " << "<BlockSize>" << " " << "<NumBlocks>" << " " << "<SeedString>" << " " << "<SuffixMax>" << " " << "<PoolUrl>" << "<PoolType>" << endl;
cout << endl;
cout << "Example Usage: " << endl;
cout << argv[0] << " " << MINER_ADDRESS << endl;
cout << endl;
cout << "Advanced Example Usage: " << endl;
cout << argv[0] << " " << MINER_ADDRESS << " " << DEFAULT_DEVICE_ID << " " << gBlockSize << " " << gNumBlocks << " " << "RandomSeed" << " " << "65536" << " " << POOL_URL << endl;
cout << endl;
cout << endl;
cout << "CudaDeviceId is optional. Default CudaDeviceId is 0" << endl;
cout << "BlockSize is optional. Default BlockSize is 64" << endl;
cout << "NumBlocks is optional. Default NumBlocks is 96" << endl;
cout << "RandomSeed is optional. No default." << endl;
cout << "SuffixMax is optional. Default is 65536" << endl;
cout << "PoolUrl is optional. Default PoolUrl is http://amoveopool.com/work" << endl;
cout << "PoolType is optional. Specify 0 (for amoveopool.com) or 1 (for amoveo original pool). Default is 0" << endl;
return -1;
}
if (argc >= 2) {
gMinerPublicKeyBase64 = argv[1];
}
if (argc >= 3) {
gDevicdeId = atoi(argv[2]);
}
if (argc >= 4) {
gBlockSize = atoi(argv[3]);
}
if (argc >= 5) {
gNumBlocks = atoi(argv[4]);
}
if (argc >= 6) {
gSeedStr = argv[5];
}
if (argc >= 7) {
gSuffixMax = atoi(argv[6]);
}
if (argc >= 8) {
gPoolUrl = argv[7];
}
if (argc >= 9) {
gPoolType = atoi(argv[8]);
}
gPoolUrlW.resize(gPoolUrl.length(), L' ');
std::copy(gPoolUrl.begin(), gPoolUrl.end(), gPoolUrlW.begin());
std::seed_seq seed(gSeedStr.begin(), gSeedStr.end());
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, gDevicdeId);
cout << "GPU Device Properties:" << endl;
cout << "maxThreadsDim: " << deviceProp.maxThreadsDim << endl;
cout << "maxThreadsPerBlock: " << deviceProp.maxThreadsPerBlock << endl;
cout << "maxGridSize: " << deviceProp.maxGridSize << endl;
hipSetDevice(gDevicdeId);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
//hipDeviceSetCacheConfig(hipFuncCachePreferNone);
unsigned char localCtx[0x70];
// Input string for the device
SHA256_CTX * d_ctx = nullptr;
// Output string by the device read by host
unsigned char *g_out = nullptr;
int *g_found = nullptr;
hipMalloc(&d_ctx, sizeof(SHA256_CTX)); // SHA256_CTX ctx to be used
hipMalloc(&g_out, 8); // partial nonce - last 8 bytes
hipMalloc(&g_found, 4); // "found" success flag
future<bool> getWorkThread = std::async(std::launch::async, getwork_thread, std::ref(seed));
// Assuming bhash and nonce are fixed size, so dynamic_shared_size should never change across work units
// size_t dynamic_shared_size = sizeof(SHA256_CTX) * gBlockSize + sizeof(SHA256_CTX) + sizeof(uint64_t) + sizeof(int);// +(64 * gBlockSize);
// std::cout << "Shared memory is " << dynamic_shared_size << "B" << std::endl;
const uint64_t blocksPerKernel = gNumBlocks * gBlockSize;
const uint64_t hashesPerKernel = blocksPerKernel * gSuffixMax;
cout << "blockSize: " << gBlockSize << endl;
cout << "numBlocks: " << gNumBlocks << endl;
cout << "suffixMax: " << gSuffixMax << endl;
pre_sha256();
uint64_t nonceOffset = 0;
int shareDiff = 0;
uint64_t nonceSolutionVal = 0;
bool found = false;
while (!gWorkData.HasNewWork())
{
usleep(100000);
}
gWorkData.getCtx(localCtx);
hipMemcpy(d_ctx, localCtx, sizeof(SHA256_CTX), hipMemcpyHostToDevice);
int foundInit = 0;
hipMemcpy(g_found, &foundInit, 4, hipMemcpyHostToDevice);
gWorkData.clearNewWork();
shareDiff = gWorkData.shareDifficulty;
t1 = std::chrono::high_resolution_clock::now();
t_last_updated = std::chrono::high_resolution_clock::now();
t_last_work_fetch = std::chrono::high_resolution_clock::now();
while (true) {
sha256_kernel << < gNumBlocks, gBlockSize >> > (g_out, g_found, d_ctx, nonceOffset, shareDiff, gSuffixMax);
hipError_t err = hipDeviceSynchronize();
if (err != hipSuccess) {
std::cout << "Cuda Error: " << hipGetErrorString(err) << std::endl;
throw std::runtime_error("Device error");
}
hipMemcpy(&found, g_found, 1, hipMemcpyDeviceToHost);
if (found) {
unsigned char nonceSolution[23];
mutexWorkData.lock();
memcpy(nonceSolution, &gWorkData.nonce[0], 15);
mutexWorkData.unlock();
hipMemcpy(&nonceSolutionVal, g_out, 8, hipMemcpyDeviceToHost);
memcpy(nonceSolution + 15, &nonceSolutionVal, 8);
//print_hash(nonceSolution);
std::async(std::launch::async, submitwork_thread, std::ref(nonceSolution));
found = 0;
hipMemcpy(g_found, &found, 1, hipMemcpyHostToDevice);
}
gTotalNonce += hashesPerKernel;
nonceOffset += blocksPerKernel;
if (gWorkData.HasNewWork())
{
mutexWorkData.lock();
gWorkData.getCtx(localCtx);
mutexWorkData.unlock();
hipMemcpy(d_ctx, localCtx, sizeof(SHA256_CTX), hipMemcpyHostToDevice);
gWorkData.clearNewWork();
//nonceOffset = 0;
}
shareDiff = gWorkData.shareDifficulty;
//print_state();
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> last_show_interval = t2 - t_last_updated;
if (last_show_interval.count() > SHOW_INTERVAL_MS) {
t_last_updated = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> span = t2 - t1;
float ratio = span.count() / 1000;
//std::cout << std::fixed << static_cast<uint64_t>(gTotalNonce / ratio) << " h/s S:" << totalSharesFound << " S/H:" << ((totalSharesFound *3600) / ratio) << std::endl;
std::cout << std::fixed << static_cast<uint64_t>(gTotalNonce / ratio) << " h/s " << endl;
}
}
hipFree(g_out);
hipFree(g_found);
hipFree(d_ctx);
hipDeviceReset();
return 0;
}
| 511528dc4f1ddd9999c8c8822940696a5714be72.cu | #define VERSION_STRING "1.0.0.7"
#define TOOL_NAME "AmoveoMinerGpuCuda"
#include <iostream>
#include <chrono>
#include <cmath>
#include <thread>
#include <iomanip>
#include <string>
#include <cassert>
#include <vector>
#include <random>
#include <climits>
#include <algorithm>
#include <functional>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "sha256.cuh"
#include "stdlib.h"
#include <future>
#include <numeric>
#include <chrono>
#include <cpprest/asyncrt_utils.h>
#include "poolApi.h"
#include "base64.h"
#include "unistd.h"
using namespace std;
using namespace std::chrono;
using namespace utility; // Common utilities like string conversions
#define FETCH_WORK_INTERVAL_MS 9000
#define SHOW_INTERVAL_MS 4000
int gElapsedMilliSecMax = FETCH_WORK_INTERVAL_MS;
//#define POOL_URL "http://localhost:32371/work" // local pool
#define POOL_URL "http://amoveopool2.com/work"
#define MINER_ADDRESS "BOPvbgrso8GakBw2Xxkc1A2lt0OiKg/JqjBuCPfP0bTI/djsM9lgp/8ZMmJgPs/aDlxQL2dT+PYfEywsaRthrmE="
#define DEFAULT_DEVICE_ID 0
string gMinerPublicKeyBase64(MINER_ADDRESS);
string gPoolUrl(POOL_URL);
string_t gPoolUrlW;
int gDevicdeId = DEFAULT_DEVICE_ID;
int gPoolType = 0; // 0: amoveopool, 1: original pool
PoolApi gPoolApi;
WorkData gWorkData;
std::mutex mutexWorkData;
uint64_t gTotalNonce = 0;
// First timestamp when program starts
static std::chrono::high_resolution_clock::time_point t1;
// Last timestamp we printed debug info
static std::chrono::high_resolution_clock::time_point t_last_updated;
static std::chrono::high_resolution_clock::time_point t_last_work_fetch;
__device__ bool checkResult(unsigned char* h, size_t diff) {
unsigned int x = 0;
unsigned int z = 0;
for (int i = 0; i < 31; i++) {
if (h[i] == 0) {
x += 8;
continue;
}
else if (h[i] < 2) {
x += 7;
z = h[i+1];
}
else if (h[i] < 4) {
x += 6;
z = (h[i+1] / 2) + ((h[i] % 2) * 128);
}
else if (h[i] < 8) {
x += 5;
z = (h[i+1] / 4) + ((h[i] % 4) * 64);
}
else if (h[i] < 16) {
x += 4;
z = (h[i+1] / 8) + ((h[i] % 8) * 32);
}
else if (h[i] < 32) {
x += 3;
z = (h[i+1] / 16) + ((h[i] % 16) * 16);
}
else if (h[i] < 64) {
x += 2;
z = (h[i+1] / 32) + ((h[i] % 32) * 8);
}
else if (h[i] < 128) {
x += 1;
z = (h[i+1] / 64) + ((h[i] % 64) * 4);
}
else {
z = (h[i+1] / 128) + ((h[i] % 128) * 2);
}
break;
}
return(((256 * x) + z) >= diff);
}
#define SUFFIX_MAX 65536
__global__ void sha256_kernel(unsigned char * out_nonce, int *out_found, const SHA256_CTX * in_ctx, uint64_t nonceOffset, int shareDiff, int suffixMax)
{
__shared__ SHA256_CTX ctxShared;
__shared__ int diff;
__shared__ uint64_t nonceOff;
// If this is the first thread of the block, init the input string in shared memory
if (threadIdx.x == 0) {
memcpy(&ctxShared, in_ctx, 0x70);
diff = shareDiff;
nonceOff = nonceOffset;
}
__syncthreads(); // Ensure the input string has been written in SMEM
unsigned int threadIndex = threadIdx.x;
uint64_t currentBlockIdx = blockIdx.x * blockDim.x + threadIdx.x + nonceOff;
unsigned char shaResult[32];
SHA256_CTX ctxReuse;
memcpy(&ctxReuse, &ctxShared, 0x70);
sha256_update(&ctxReuse, (BYTE*)¤tBlockIdx, 6);
sha256_updateAmoveoSpecial(&ctxReuse);
SHA256_CTX ctxTmp;
int nonceSuffix = 0;
for (nonceSuffix = 0; nonceSuffix < suffixMax; nonceSuffix++) {
memcpy(&ctxTmp, &ctxReuse, 0x70);
sha256_finalAmoveo(&ctxTmp, (BYTE*)&nonceSuffix, shaResult);
if (checkResult(shaResult, diff) && atomicExch(out_found, 1) == 0) {
memcpy(out_nonce, ¤tBlockIdx, 6);
memcpy(out_nonce + 6, &nonceSuffix, 2);
return;
}
}
}
__global__ void sha256Init_kernel(unsigned char * out_ctx, unsigned char * bhash, unsigned char * noncePart, int diff)
{
SHA256_CTX ctx;
unsigned char bhashLocal[32];
unsigned char nonceLocal[15];
memcpy(bhashLocal, bhash, 32);
memcpy(nonceLocal, noncePart, 15);
sha256_init(&ctx);
sha256_update(&ctx, bhashLocal, 32);
sha256_update(&ctx, nonceLocal, 15);
memcpy(out_ctx, &ctx, 0x70);
}
void pre_sha256() {
checkCudaErrors(cudaMemcpyToSymbol(dev_k, host_k, sizeof(host_k), 0, cudaMemcpyHostToDevice));
}
// Prints a 32 bytes sha256 to the hexadecimal form filled with zeroes
void print_hash(const unsigned char* sha256) {
for (size_t i = 0; i < 32; ++i) {
std::cout << std::hex << std::setfill('0') << std::setw(2) << static_cast<int>(sha256[i]);
}
std::cout << std::dec << std::endl;
}
bool isTimeToGetNewWork()
{
std::chrono::high_resolution_clock::time_point tNow = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> lastWorkFetchInterval = tNow - t_last_work_fetch;
if (lastWorkFetchInterval.count() > gElapsedMilliSecMax) {
t_last_work_fetch = tNow;
return true;
}
return false;
}
void print_state() {
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> last_show_interval = t2 - t_last_updated;
if (last_show_interval.count() > SHOW_INTERVAL_MS) {
t_last_updated = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> span = t2 - t1;
float ratio = span.count() / 1000;
std::cout << std::fixed << static_cast<uint64_t>(gTotalNonce / ratio) << " h/s " << endl;
}
}
static bool getwork_thread(std::seed_seq &seed)
{
std::independent_bits_engine<std::default_random_engine, 32, uint32_t> randomBytesEngine(seed);
unsigned char ctxBuf[0x70];
unsigned char *d_bhash = nullptr;
unsigned char *d_nonce = nullptr;
cudaMalloc(&d_bhash, 32);
cudaMalloc(&d_nonce, 15);
unsigned char * outCtx = nullptr;
cudaMalloc(&outCtx, 0x70);
while (true)
{
WorkData workDataNew;
gPoolApi.GetWork(gPoolUrlW, &workDataNew, gMinerPublicKeyBase64, gPoolType);
// Check if new work unit is actually different than what we currently have
if (memcmp(&gWorkData.bhash[0], &workDataNew.bhash[0], 32) != 0) {
mutexWorkData.lock();
std::generate(begin(gWorkData.nonce), end(gWorkData.nonce), std::ref(randomBytesEngine));
gWorkData.bhash = workDataNew.bhash;
gWorkData.blockDifficulty = workDataNew.blockDifficulty;
gWorkData.shareDifficulty = workDataNew.shareDifficulty;
cudaMemcpy(d_bhash, &gWorkData.bhash[0], 32, cudaMemcpyHostToDevice);
cudaMemcpy(d_nonce, &gWorkData.nonce[0], 15, cudaMemcpyHostToDevice);
sha256Init_kernel << < 1, 1 >> > (outCtx, d_bhash, d_nonce, gWorkData.blockDifficulty);
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
std::cout << "getwork_thread Cuda Error: " << cudaGetErrorString(err) << std::endl;
throw std::runtime_error("getwork_thread Device error");
}
cudaMemcpy(ctxBuf, outCtx, 0x70, cudaMemcpyDeviceToHost);
//SHA256_CTX ctx;
//memcpy(&ctx, outCtx, sizeof(SHA256_CTX));
gWorkData.setCtx(ctxBuf);
mutexWorkData.unlock();
std::cout << "New Work ||" << "BDiff:" << gWorkData.blockDifficulty << " SDiff:" << gWorkData.shareDifficulty << endl;
}
else {
// Even if new work is not available, shareDiff will likely change. Need to adjust, else will get a "low diff share" error.
gWorkData.shareDifficulty = workDataNew.shareDifficulty;
}
usleep(2000000);
}
cudaFree(outCtx);
cudaFree(d_bhash);
cudaFree(d_nonce);
return true;
}
static void submitwork_thread(unsigned char * nonceSolution)
{
gPoolApi.SubmitWork(gPoolUrlW, base64_encode(nonceSolution, 23), gMinerPublicKeyBase64);
cout << "--- Found Share --- " << endl;
}
int gBlockSize = 64;
int gNumBlocks = 96;
int gSuffixMax = 65536;
std::string gSeedStr("ImAraNdOmStrInG");
int main(int argc, char* argv[])
{
cout << TOOL_NAME << " v" << VERSION_STRING << endl;
if (argc <= 1) {
cout << "Example Template: " << endl;
cout << argv[0] << " " << "<Base64AmoveoAddress>" << " " << "<CudaDeviceId>" << " " << "<BlockSize>" << " " << "<NumBlocks>" << " " << "<SeedString>" << " " << "<SuffixMax>" << " " << "<PoolUrl>" << "<PoolType>" << endl;
cout << endl;
cout << "Example Usage: " << endl;
cout << argv[0] << " " << MINER_ADDRESS << endl;
cout << endl;
cout << "Advanced Example Usage: " << endl;
cout << argv[0] << " " << MINER_ADDRESS << " " << DEFAULT_DEVICE_ID << " " << gBlockSize << " " << gNumBlocks << " " << "RandomSeed" << " " << "65536" << " " << POOL_URL << endl;
cout << endl;
cout << endl;
cout << "CudaDeviceId is optional. Default CudaDeviceId is 0" << endl;
cout << "BlockSize is optional. Default BlockSize is 64" << endl;
cout << "NumBlocks is optional. Default NumBlocks is 96" << endl;
cout << "RandomSeed is optional. No default." << endl;
cout << "SuffixMax is optional. Default is 65536" << endl;
cout << "PoolUrl is optional. Default PoolUrl is http://amoveopool.com/work" << endl;
cout << "PoolType is optional. Specify 0 (for amoveopool.com) or 1 (for amoveo original pool). Default is 0" << endl;
return -1;
}
if (argc >= 2) {
gMinerPublicKeyBase64 = argv[1];
}
if (argc >= 3) {
gDevicdeId = atoi(argv[2]);
}
if (argc >= 4) {
gBlockSize = atoi(argv[3]);
}
if (argc >= 5) {
gNumBlocks = atoi(argv[4]);
}
if (argc >= 6) {
gSeedStr = argv[5];
}
if (argc >= 7) {
gSuffixMax = atoi(argv[6]);
}
if (argc >= 8) {
gPoolUrl = argv[7];
}
if (argc >= 9) {
gPoolType = atoi(argv[8]);
}
gPoolUrlW.resize(gPoolUrl.length(), L' ');
std::copy(gPoolUrl.begin(), gPoolUrl.end(), gPoolUrlW.begin());
std::seed_seq seed(gSeedStr.begin(), gSeedStr.end());
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, gDevicdeId);
cout << "GPU Device Properties:" << endl;
cout << "maxThreadsDim: " << deviceProp.maxThreadsDim << endl;
cout << "maxThreadsPerBlock: " << deviceProp.maxThreadsPerBlock << endl;
cout << "maxGridSize: " << deviceProp.maxGridSize << endl;
cudaSetDevice(gDevicdeId);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
//cudaDeviceSetCacheConfig(cudaFuncCachePreferNone);
unsigned char localCtx[0x70];
// Input string for the device
SHA256_CTX * d_ctx = nullptr;
// Output string by the device read by host
unsigned char *g_out = nullptr;
int *g_found = nullptr;
cudaMalloc(&d_ctx, sizeof(SHA256_CTX)); // SHA256_CTX ctx to be used
cudaMalloc(&g_out, 8); // partial nonce - last 8 bytes
cudaMalloc(&g_found, 4); // "found" success flag
future<bool> getWorkThread = std::async(std::launch::async, getwork_thread, std::ref(seed));
// Assuming bhash and nonce are fixed size, so dynamic_shared_size should never change across work units
// size_t dynamic_shared_size = sizeof(SHA256_CTX) * gBlockSize + sizeof(SHA256_CTX) + sizeof(uint64_t) + sizeof(int);// +(64 * gBlockSize);
// std::cout << "Shared memory is " << dynamic_shared_size << "B" << std::endl;
const uint64_t blocksPerKernel = gNumBlocks * gBlockSize;
const uint64_t hashesPerKernel = blocksPerKernel * gSuffixMax;
cout << "blockSize: " << gBlockSize << endl;
cout << "numBlocks: " << gNumBlocks << endl;
cout << "suffixMax: " << gSuffixMax << endl;
pre_sha256();
uint64_t nonceOffset = 0;
int shareDiff = 0;
uint64_t nonceSolutionVal = 0;
bool found = false;
while (!gWorkData.HasNewWork())
{
usleep(100000);
}
gWorkData.getCtx(localCtx);
cudaMemcpy(d_ctx, localCtx, sizeof(SHA256_CTX), cudaMemcpyHostToDevice);
int foundInit = 0;
cudaMemcpy(g_found, &foundInit, 4, cudaMemcpyHostToDevice);
gWorkData.clearNewWork();
shareDiff = gWorkData.shareDifficulty;
t1 = std::chrono::high_resolution_clock::now();
t_last_updated = std::chrono::high_resolution_clock::now();
t_last_work_fetch = std::chrono::high_resolution_clock::now();
while (true) {
sha256_kernel << < gNumBlocks, gBlockSize >> > (g_out, g_found, d_ctx, nonceOffset, shareDiff, gSuffixMax);
cudaError_t err = cudaDeviceSynchronize();
if (err != cudaSuccess) {
std::cout << "Cuda Error: " << cudaGetErrorString(err) << std::endl;
throw std::runtime_error("Device error");
}
cudaMemcpy(&found, g_found, 1, cudaMemcpyDeviceToHost);
if (found) {
unsigned char nonceSolution[23];
mutexWorkData.lock();
memcpy(nonceSolution, &gWorkData.nonce[0], 15);
mutexWorkData.unlock();
cudaMemcpy(&nonceSolutionVal, g_out, 8, cudaMemcpyDeviceToHost);
memcpy(nonceSolution + 15, &nonceSolutionVal, 8);
//print_hash(nonceSolution);
std::async(std::launch::async, submitwork_thread, std::ref(nonceSolution));
found = 0;
cudaMemcpy(g_found, &found, 1, cudaMemcpyHostToDevice);
}
gTotalNonce += hashesPerKernel;
nonceOffset += blocksPerKernel;
if (gWorkData.HasNewWork())
{
mutexWorkData.lock();
gWorkData.getCtx(localCtx);
mutexWorkData.unlock();
cudaMemcpy(d_ctx, localCtx, sizeof(SHA256_CTX), cudaMemcpyHostToDevice);
gWorkData.clearNewWork();
//nonceOffset = 0;
}
shareDiff = gWorkData.shareDifficulty;
//print_state();
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> last_show_interval = t2 - t_last_updated;
if (last_show_interval.count() > SHOW_INTERVAL_MS) {
t_last_updated = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> span = t2 - t1;
float ratio = span.count() / 1000;
//std::cout << std::fixed << static_cast<uint64_t>(gTotalNonce / ratio) << " h/s S:" << totalSharesFound << " S/H:" << ((totalSharesFound *3600) / ratio) << std::endl;
std::cout << std::fixed << static_cast<uint64_t>(gTotalNonce / ratio) << " h/s " << endl;
}
}
cudaFree(g_out);
cudaFree(g_found);
cudaFree(d_ctx);
cudaDeviceReset();
return 0;
}
|
0fba448105c3ecc559a667dd069cc2c364ee3d53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/deformable_offsets_impl.cuh"
#include <stdio.h>
#include <stdint.h>
#include "include/hip/hip_fp16.h"
constexpr int OFFSET_NUM = 3;
template <typename T>
__device__ T DefromableBilinear(const T *input, const uint width, const uint height, const T x, const T y) {
if (y <= static_cast<T>(-1) || y >= static_cast<T>(height) || x <= static_cast<T>(-1) || x >= static_cast<T>(width)) {
return 0;
}
int left = floorf(x);
int top = floorf(y);
int right = left + 1;
int bottom = top + 1;
T l = x - static_cast<T>(left);
T t = y - static_cast<T>(top);
T r = static_cast<T>(1) - l;
T b = static_cast<T>(1) - t;
T lt = 0;
T lb = 0;
if (left >= 0) {
if (top >= 0) {
lt = input[top * width + left];
}
if (bottom <= height - 1) {
lb = input[bottom * width + left];
}
}
T rt = 0;
T rb = 0;
if (right <= width - 1) {
if (top >= 0) {
rt = input[top * width + right];
}
if (bottom <= height - 1) {
rb = input[bottom * width + right];
}
}
T w_lt = r * b;
T w_rt = l * b;
T w_lb = r * t;
T w_rb = l * t;
T val = (w_lt * lt + w_rt * rt + w_lb * lb + w_rb * rb);
return val;
}
__global__ void GenPositionGridKernel(const uint kernel_h, const uint kernel_w, const uint stride_h,
const uint stride_w, const uint dilations_h, const uint dilations_w,
const uint pad_l, const uint pad_t, const uint output_w, const uint num,
int32_t *position_grid) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
uint y = i / output_w;
uint x = i % output_w;
uint pixel_y = y / kernel_h;
uint pixel_x = x / kernel_w;
uint kernel_y = y % kernel_h;
uint kernel_x = x % kernel_w;
uint index = i * 2;
position_grid[index] = pixel_x * stride_w + kernel_x * dilations_w - pad_l;
position_grid[index + 1] = pixel_y * stride_h + kernel_y * dilations_h - pad_t;
}
}
template <class T>
__global__ void DeformableOffsetsKernel(const T *input, const T *offsets, const int32_t *position_grid, const uint c,
const uint output_n_dim, const uint output_c_dim, const uint output_w,
const uint c_size_per_dfm_group, const uint offset_n_dim,
const uint offset_mask_dim, const uint offset_group_dim,
const uint offset_kh_dim, const uint offset_kw_dim, const uint pixel_w,
const uint input_n_dim, const uint input_c_dim, const uint input_h,
const uint input_w, const uint kernel_h, const uint kernel_w, const uint num,
T *output) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
// Get original input position
const uint hw_idx = i % output_c_dim;
const uint position_grid_idx = hw_idx * 2;
const int input_x = position_grid[position_grid_idx];
const int input_y = position_grid[position_grid_idx + 1];
// Get offsets
const uint n_index = i / output_n_dim;
const uint c_index = i / output_c_dim % c;
const uint x = hw_idx % output_w;
const uint y = hw_idx / output_w;
const uint dfm_group_index = c_index / c_size_per_dfm_group;
const uint pixel_x = x / kernel_w;
const uint pixel_y = y / kernel_h;
const uint kernel_x = x % kernel_w;
const uint kernel_y = y % kernel_h;
const uint x_offsets_offset = n_index * offset_n_dim // + 0 * offset_mask_dim
+ dfm_group_index * offset_group_dim + kernel_y * offset_kh_dim +
kernel_x * offset_kw_dim + pixel_y * pixel_w + pixel_x;
T x_offsets = offsets[x_offsets_offset];
const int y_offsets_offset = x_offsets_offset + offset_mask_dim;
T y_offsets = offsets[y_offsets_offset];
const int mask_offset = y_offsets_offset + offset_mask_dim;
T mask = offsets[mask_offset];
// Deform
T deformed_x = static_cast<T>(input_x) + x_offsets;
T deformed_y = static_cast<T>(input_y) + y_offsets;
const T *input_base = input + n_index * input_n_dim + c_index * input_c_dim;
T bilinear_val = DefromableBilinear(input_base, input_w, input_h, deformed_x, deformed_y);
output[i] = bilinear_val * mask;
}
}
template <class T>
void DeformableOffsets(const T *input, const T *offsets, const int32_t *position_grid, uint n, uint c, uint input_h,
uint input_w, uint dfm_group, uint kernel_h, uint kernel_w, uint output_h, uint output_w,
T *output, uint32_t device_id, hipStream_t cuda_stream) {
const uint pixel_w = output_w / kernel_w;
const uint pixel_h = output_h / kernel_h;
const uint output_c_dim = output_h * output_w;
const uint output_n_dim = c * output_c_dim;
const uint num = n * output_n_dim;
const uint c_size_per_dfm_group = c / dfm_group;
const uint offset_kw_dim = pixel_h * pixel_w;
const uint offset_kh_dim = offset_kw_dim * kernel_w;
const uint offset_group_dim = offset_kh_dim * kernel_h;
const uint offset_mask_dim = offset_group_dim * dfm_group;
const uint offset_n_dim = offset_mask_dim * OFFSET_NUM;
const uint input_c_dim = input_h * input_w;
const uint input_n_dim = input_c_dim * c;
hipLaunchKernelGGL(( DeformableOffsetsKernel), dim3(CUDA_BLOCKS(device_id, num)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
input, offsets, position_grid, c, output_n_dim, output_c_dim, output_w, c_size_per_dfm_group, offset_n_dim,
offset_mask_dim, offset_group_dim, offset_kh_dim, offset_kw_dim, pixel_w, input_n_dim, input_c_dim, input_h,
input_w, kernel_h, kernel_w, num, output);
}
void GenPositionGrid(const uint kernel_h, const uint kernel_w, const uint stride_h, const uint stride_w,
const uint dilations_h, const uint dilations_w, const uint pad_l, const uint pad_t,
const uint output_w, const uint num, int32_t *position_grid, const uint32_t device_id,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( GenPositionGridKernel), dim3(CUDA_BLOCKS(device_id, num)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
kernel_h, kernel_w, stride_h, stride_w, dilations_h, dilations_w, pad_l, pad_t, output_w, num, position_grid);
}
template CUDA_LIB_EXPORT void DeformableOffsets<float>(const float *input, const float *offsets,
const int32_t *position_grid, uint n, uint c, uint input_h,
uint input_w, uint dfm_group, uint kernel_h, uint kernel_w,
uint output_h, uint output_w, float *output,
uint32_t device_id, hipStream_t cuda_stream);
template CUDA_LIB_EXPORT void DeformableOffsets<half>(const half *input, const half *offsets,
const int32_t *position_grid, uint n, uint c, uint input_h,
uint input_w, uint dfm_group, uint kernel_h, uint kernel_w,
uint output_h, uint output_w, half *output,
uint32_t device_id, hipStream_t cuda_stream);
| 0fba448105c3ecc559a667dd069cc2c364ee3d53.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/deformable_offsets_impl.cuh"
#include <stdio.h>
#include <stdint.h>
#include "include/cuda_fp16.h"
constexpr int OFFSET_NUM = 3;
template <typename T>
__device__ T DefromableBilinear(const T *input, const uint width, const uint height, const T x, const T y) {
if (y <= static_cast<T>(-1) || y >= static_cast<T>(height) || x <= static_cast<T>(-1) || x >= static_cast<T>(width)) {
return 0;
}
int left = floorf(x);
int top = floorf(y);
int right = left + 1;
int bottom = top + 1;
T l = x - static_cast<T>(left);
T t = y - static_cast<T>(top);
T r = static_cast<T>(1) - l;
T b = static_cast<T>(1) - t;
T lt = 0;
T lb = 0;
if (left >= 0) {
if (top >= 0) {
lt = input[top * width + left];
}
if (bottom <= height - 1) {
lb = input[bottom * width + left];
}
}
T rt = 0;
T rb = 0;
if (right <= width - 1) {
if (top >= 0) {
rt = input[top * width + right];
}
if (bottom <= height - 1) {
rb = input[bottom * width + right];
}
}
T w_lt = r * b;
T w_rt = l * b;
T w_lb = r * t;
T w_rb = l * t;
T val = (w_lt * lt + w_rt * rt + w_lb * lb + w_rb * rb);
return val;
}
__global__ void GenPositionGridKernel(const uint kernel_h, const uint kernel_w, const uint stride_h,
const uint stride_w, const uint dilations_h, const uint dilations_w,
const uint pad_l, const uint pad_t, const uint output_w, const uint num,
int32_t *position_grid) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
uint y = i / output_w;
uint x = i % output_w;
uint pixel_y = y / kernel_h;
uint pixel_x = x / kernel_w;
uint kernel_y = y % kernel_h;
uint kernel_x = x % kernel_w;
uint index = i * 2;
position_grid[index] = pixel_x * stride_w + kernel_x * dilations_w - pad_l;
position_grid[index + 1] = pixel_y * stride_h + kernel_y * dilations_h - pad_t;
}
}
template <class T>
__global__ void DeformableOffsetsKernel(const T *input, const T *offsets, const int32_t *position_grid, const uint c,
const uint output_n_dim, const uint output_c_dim, const uint output_w,
const uint c_size_per_dfm_group, const uint offset_n_dim,
const uint offset_mask_dim, const uint offset_group_dim,
const uint offset_kh_dim, const uint offset_kw_dim, const uint pixel_w,
const uint input_n_dim, const uint input_c_dim, const uint input_h,
const uint input_w, const uint kernel_h, const uint kernel_w, const uint num,
T *output) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += blockDim.x * gridDim.x) {
// Get original input position
const uint hw_idx = i % output_c_dim;
const uint position_grid_idx = hw_idx * 2;
const int input_x = position_grid[position_grid_idx];
const int input_y = position_grid[position_grid_idx + 1];
// Get offsets
const uint n_index = i / output_n_dim;
const uint c_index = i / output_c_dim % c;
const uint x = hw_idx % output_w;
const uint y = hw_idx / output_w;
const uint dfm_group_index = c_index / c_size_per_dfm_group;
const uint pixel_x = x / kernel_w;
const uint pixel_y = y / kernel_h;
const uint kernel_x = x % kernel_w;
const uint kernel_y = y % kernel_h;
const uint x_offsets_offset = n_index * offset_n_dim // + 0 * offset_mask_dim
+ dfm_group_index * offset_group_dim + kernel_y * offset_kh_dim +
kernel_x * offset_kw_dim + pixel_y * pixel_w + pixel_x;
T x_offsets = offsets[x_offsets_offset];
const int y_offsets_offset = x_offsets_offset + offset_mask_dim;
T y_offsets = offsets[y_offsets_offset];
const int mask_offset = y_offsets_offset + offset_mask_dim;
T mask = offsets[mask_offset];
// Deform
T deformed_x = static_cast<T>(input_x) + x_offsets;
T deformed_y = static_cast<T>(input_y) + y_offsets;
const T *input_base = input + n_index * input_n_dim + c_index * input_c_dim;
T bilinear_val = DefromableBilinear(input_base, input_w, input_h, deformed_x, deformed_y);
output[i] = bilinear_val * mask;
}
}
template <class T>
void DeformableOffsets(const T *input, const T *offsets, const int32_t *position_grid, uint n, uint c, uint input_h,
uint input_w, uint dfm_group, uint kernel_h, uint kernel_w, uint output_h, uint output_w,
T *output, uint32_t device_id, cudaStream_t cuda_stream) {
const uint pixel_w = output_w / kernel_w;
const uint pixel_h = output_h / kernel_h;
const uint output_c_dim = output_h * output_w;
const uint output_n_dim = c * output_c_dim;
const uint num = n * output_n_dim;
const uint c_size_per_dfm_group = c / dfm_group;
const uint offset_kw_dim = pixel_h * pixel_w;
const uint offset_kh_dim = offset_kw_dim * kernel_w;
const uint offset_group_dim = offset_kh_dim * kernel_h;
const uint offset_mask_dim = offset_group_dim * dfm_group;
const uint offset_n_dim = offset_mask_dim * OFFSET_NUM;
const uint input_c_dim = input_h * input_w;
const uint input_n_dim = input_c_dim * c;
DeformableOffsetsKernel<<<CUDA_BLOCKS(device_id, num), CUDA_THREADS(device_id), 0, cuda_stream>>>(
input, offsets, position_grid, c, output_n_dim, output_c_dim, output_w, c_size_per_dfm_group, offset_n_dim,
offset_mask_dim, offset_group_dim, offset_kh_dim, offset_kw_dim, pixel_w, input_n_dim, input_c_dim, input_h,
input_w, kernel_h, kernel_w, num, output);
}
void GenPositionGrid(const uint kernel_h, const uint kernel_w, const uint stride_h, const uint stride_w,
const uint dilations_h, const uint dilations_w, const uint pad_l, const uint pad_t,
const uint output_w, const uint num, int32_t *position_grid, const uint32_t device_id,
cudaStream_t cuda_stream) {
GenPositionGridKernel<<<CUDA_BLOCKS(device_id, num), CUDA_THREADS(device_id), 0, cuda_stream>>>(
kernel_h, kernel_w, stride_h, stride_w, dilations_h, dilations_w, pad_l, pad_t, output_w, num, position_grid);
}
template CUDA_LIB_EXPORT void DeformableOffsets<float>(const float *input, const float *offsets,
const int32_t *position_grid, uint n, uint c, uint input_h,
uint input_w, uint dfm_group, uint kernel_h, uint kernel_w,
uint output_h, uint output_w, float *output,
uint32_t device_id, cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT void DeformableOffsets<half>(const half *input, const half *offsets,
const int32_t *position_grid, uint n, uint c, uint input_h,
uint input_w, uint dfm_group, uint kernel_h, uint kernel_w,
uint output_h, uint output_w, half *output,
uint32_t device_id, cudaStream_t cuda_stream);
|
99704697a415530336ad9816d1396a39f828a90b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_dot<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num_ * inner_num_)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
| 99704697a415530336ad9816d1396a39f828a90b.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "thrust/device_vector.h"
#include "caffe/layers/softmax_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_max, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] -= channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int channels,
const int spatial_dim, const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype dot = 0;
for (int c = 0; c < channels; ++c) {
dot += (data_1[(n * channels + c) * spatial_dim + s]
* data_2[(n * channels + c) * spatial_dim + s]);
}
channel_dot[index] = dot;
}
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top,
const bool preforward_flag) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = bottom[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, bottom_data, top_data);
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, top_data);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_, top_data,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, top_data);
}
template <typename Dtype>
void SoftmaxLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom,
const bool prebackward_flag) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* top_data = top[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
int count = top[0]->count();
int channels = top[0]->shape(softmax_axis_);
caffe_copy(count, top_diff, bottom_diff);
// Compute inner1d(top_diff, top_data) and subtract them from the bottom diff.
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype><<<CAFFE_GET_BLOCKS(outer_num_ * inner_num_),
CAFFE_CUDA_NUM_THREADS>>>(outer_num_, channels, inner_num_,
top_diff, top_data, scale_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num_, channels, inner_num_,
scale_data, bottom_diff);
// elementwise multiplication
caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer);
} // namespace caffe
|
3084d0822761dfd5214933982c89d30573fa6fa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "Particles.cuh"
#include "hip/hip_runtime.h"
#include <device_launch_parameters.h>
#define gravity 9.8
#define _SIZE_T_DEFINED
#ifndef __HIPCC__
#define __HIPCC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void Sand_kernel(point_t *points_d, int *cells_d, int pAm, int w, int h) {
int i;
unsigned int id;
point_t *p;
bool collided;
id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < pAm) {
p = points_d + id;
switch (p->type) {
case sand:
for (i = 1; i < (int)p->speed/2 + 1; ++i) {
if ((cells_d[ p->x + (p->y + i) * w] != -1) || (p->y + i) >= h - 3) {
collided = true;
break;
}
}
if(i > 1 || (i == 1 && !collided)) {
--i;
cells_d[p->x + p->y * w ] = -1;
cells_d[p->x + (p->y + i) * w] = (int)id;
p->y += i;
if(!collided && p->speed + p->velocity <= gravity) {
p->speed += p->velocity;
} else if(collided)
p->speed = 1;
return;
}
collided = false;
for (i = 1; i < (int)p->speed/2 + 1; ++i) {
if ((cells_d[(p->x + i) + (p->y + i) * w] != -1) || (p->y + i) >= h - 3 || (p->x + i) > w - 3) {
collided = true;
break;
}
}
if(i > 1 || (i == 1 && !collided)) {
--i;
cells_d[p->x + p->y * w] = -1;
cells_d[(p->x + i) + (p->y + i) * w] = (int)id;
p->y += i;
p->x += i;
if(!collided && p->speed / 2.0 >= 1.0) {
p->speed /= 1.25;
} else
p->speed = 1;
return;
}
collided = false;
for (i = 1; i < (int) p->speed/2 + 1; ++i) {
if ((cells_d[(p->x - i) + (p->y + i) * w] != -1) || (p->y + i) >= h - 3 || (p->x - i) < 3) {
collided = true;
break;
}
}
if(i > 1 || (i == 1 && !collided)) {
--i;
cells_d[p->x + p->y * w] = -1;
cells_d[(p->x + i) + (p->y + i) * w] = (int)id;
p->y += i;
p->x -= i;
if(!collided && p->speed / 2.0 <= 1.0) {
p->speed /= 1.25;
} else
p->speed = 1;
return;
}
break;
case rock:
return;
case water:
return;
}
}
}
namespace cu {
constexpr int blockSize = 256;
extern "C"{
void updateSand(point_t *points, int *cells, point_t *points_d, int *cells_d, int pAm, int w, int h, int updated)
{
int NumOfBlocks;
if(updated == 1) {
hipMemcpy(points_d, points, sizeof(point_t) * pAm, hipMemcpyHostToDevice);
hipMemcpy(cells_d, cells, sizeof(int) * w * h, hipMemcpyHostToDevice);
}
NumOfBlocks = (int)((double)pAm/(double)blockSize) + 1;
hipLaunchKernelGGL(( Sand_kernel), dim3(NumOfBlocks), dim3(blockSize), 0, 0, points_d, cells_d, pAm, w, h);
hipMemcpy(points, points_d, sizeof(point_t) * pAm, hipMemcpyDeviceToHost);
hipMemcpy(cells, cells_d, sizeof(int) * w * h, hipMemcpyDeviceToHost);
}
void AllocPointsAndCells(int w, int h, point_t **points_d, int **cells_d, point_t *points, int *cells, int pAm) {
hipMalloc(points_d, sizeof(point_t) * w * h);
hipMalloc(cells_d, sizeof(int) * w * h);
}
void FreePointsAndCells(point_t *points_d, int *cells_d) {
hipFree(points_d);
hipFree(cells_d);
}
}
} | 3084d0822761dfd5214933982c89d30573fa6fa2.cu | #include "Particles.cuh"
#include "cuda_runtime.h"
#include <device_launch_parameters.h>
#define gravity 9.8
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void Sand_kernel(point_t *points_d, int *cells_d, int pAm, int w, int h) {
int i;
unsigned int id;
point_t *p;
bool collided;
id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < pAm) {
p = points_d + id;
switch (p->type) {
case sand:
for (i = 1; i < (int)p->speed/2 + 1; ++i) {
if ((cells_d[ p->x + (p->y + i) * w] != -1) || (p->y + i) >= h - 3) {
collided = true;
break;
}
}
if(i > 1 || (i == 1 && !collided)) {
--i;
cells_d[p->x + p->y * w ] = -1;
cells_d[p->x + (p->y + i) * w] = (int)id;
p->y += i;
if(!collided && p->speed + p->velocity <= gravity) {
p->speed += p->velocity;
} else if(collided)
p->speed = 1;
return;
}
collided = false;
for (i = 1; i < (int)p->speed/2 + 1; ++i) {
if ((cells_d[(p->x + i) + (p->y + i) * w] != -1) || (p->y + i) >= h - 3 || (p->x + i) > w - 3) {
collided = true;
break;
}
}
if(i > 1 || (i == 1 && !collided)) {
--i;
cells_d[p->x + p->y * w] = -1;
cells_d[(p->x + i) + (p->y + i) * w] = (int)id;
p->y += i;
p->x += i;
if(!collided && p->speed / 2.0 >= 1.0) {
p->speed /= 1.25;
} else
p->speed = 1;
return;
}
collided = false;
for (i = 1; i < (int) p->speed/2 + 1; ++i) {
if ((cells_d[(p->x - i) + (p->y + i) * w] != -1) || (p->y + i) >= h - 3 || (p->x - i) < 3) {
collided = true;
break;
}
}
if(i > 1 || (i == 1 && !collided)) {
--i;
cells_d[p->x + p->y * w] = -1;
cells_d[(p->x + i) + (p->y + i) * w] = (int)id;
p->y += i;
p->x -= i;
if(!collided && p->speed / 2.0 <= 1.0) {
p->speed /= 1.25;
} else
p->speed = 1;
return;
}
break;
case rock:
return;
case water:
return;
}
}
}
namespace cu {
constexpr int blockSize = 256;
extern "C"{
void updateSand(point_t *points, int *cells, point_t *points_d, int *cells_d, int pAm, int w, int h, int updated)
{
int NumOfBlocks;
if(updated == 1) {
cudaMemcpy(points_d, points, sizeof(point_t) * pAm, cudaMemcpyHostToDevice);
cudaMemcpy(cells_d, cells, sizeof(int) * w * h, cudaMemcpyHostToDevice);
}
NumOfBlocks = (int)((double)pAm/(double)blockSize) + 1;
Sand_kernel<<<NumOfBlocks, blockSize>>>(points_d, cells_d, pAm, w, h);
cudaMemcpy(points, points_d, sizeof(point_t) * pAm, cudaMemcpyDeviceToHost);
cudaMemcpy(cells, cells_d, sizeof(int) * w * h, cudaMemcpyDeviceToHost);
}
void AllocPointsAndCells(int w, int h, point_t **points_d, int **cells_d, point_t *points, int *cells, int pAm) {
cudaMalloc(points_d, sizeof(point_t) * w * h);
cudaMalloc(cells_d, sizeof(int) * w * h);
}
void FreePointsAndCells(point_t *points_d, int *cells_d) {
cudaFree(points_d);
cudaFree(cells_d);
}
}
} |
ed7bf433910c2580b717d2258bb6024d34c6eaa6.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map.hpp"
#include "coordinate_map_cpu.hpp"
#include "coordinate_map_key.hpp"
#include "coordinate_map_manager.hpp"
#include "errors.hpp"
#include "types.hpp"
#include "utils.hpp"
#include "pooling_avg_kernel.cuh"
#include "pooling_max_kernel.cuh"
// Ninja
#include "local_pooling_cpu.cpp"
#include <pybind11/pybind11.h>
#include <torch/extension.h>
namespace minkowski {
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
std::pair<at::Tensor, at::Tensor> LocalPoolingForwardGPU(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));
// create an output coordinate map
if (!p_out_map_key->is_key_set()) {
coordinate_map_key_type out_key =
std::get<0>(p_map_manager->stride(in_key, kernel_stride));
p_out_map_key->set_key(out_key);
}
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
auto const out_nrows = p_map_manager->size(p_out_map_key->get_key());
at::Tensor out_feat =
torch::zeros({out_nrows, in_feat.size(1)}, in_feat.options());
LOG_DEBUG("Allocated", out_nrows, "x", in_feat.size(1), "features.");
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
at::Tensor max_index = torch::empty({0}, torch::TensorOptions()
.device(in_feat.device())
.dtype(torch::kInt)
.requires_grad(false));
max_index.resize_({out_nrows, in_feat.size(1)});
max_index.zero_();
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(),
out_feat.template data_ptr<scalar_t>(), out_nrows,
max_index.data_ptr<int>(), in_feat.size(1), in_out,
byte_allocator, stream);
});
return std::make_pair(out_feat, max_index);
} else {
at::Tensor num_nonzero =
torch::empty({0}, in_feat.options().requires_grad(false));
if (pooling_mode == PoolingMode::LOCAL_AVG_POOLING) {
num_nonzero.resize_({out_nrows});
num_nonzero.zero_();
}
hipsparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
hipsparseSetStream(handle, stream);
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
TemplatedAllocator<char> byte_allocator;
NonzeroAvgPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
out_feat.template data_ptr<scalar_t>(), out_nrows,
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_out, pooling_mode == PoolingMode::LOCAL_AVG_POOLING,
byte_allocator, handle, stream);
});
return std::make_pair(out_feat, num_nonzero);
}
}
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
at::Tensor LocalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(grad_out_feat.is_contiguous(), "grad_out_feata must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(grad_out_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
coordinate_map_key_type out_key = p_out_map_key->get_key();
ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND);
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
at::Tensor grad_in_feat =
torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
MaxPoolingBackwardKernelGPU<scalar_t>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.data_ptr<int>(),
in_feat.size(1), stream);
});
} else {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
NonzeroAvgPoolingBackwardKernelGPU<
scalar_t, default_types::index_type, TemplatedAllocator<char>>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.template data_ptr<scalar_t>(),
in_feat.size(1), in_out,
pooling_mode == PoolingMode::LOCAL_AVG_POOLING, stream);
});
}
return grad_in_feat;
}
// Forward
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
// Backward
template at::Tensor LocalPoolingBackwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template at::Tensor
LocalPoolingBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
} // end namespace minkowski
| ed7bf433910c2580b717d2258bb6024d34c6eaa6.cu | /*
* Copyright (c) 2020 NVIDIA Corporation.
* Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map.hpp"
#include "coordinate_map_cpu.hpp"
#include "coordinate_map_key.hpp"
#include "coordinate_map_manager.hpp"
#include "errors.hpp"
#include "types.hpp"
#include "utils.hpp"
#include "pooling_avg_kernel.cuh"
#include "pooling_max_kernel.cuh"
// Ninja
#include "local_pooling_cpu.cpp"
#include <pybind11/pybind11.h>
#include <torch/extension.h>
namespace minkowski {
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
std::pair<at::Tensor, at::Tensor> LocalPoolingForwardGPU(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
ASSERT(in_feat.size(0) == p_map_manager->size(in_key), "Invalid in_feat size",
in_feat.size(0), "!=", p_map_manager->size(in_key));
// create an output coordinate map
if (!p_out_map_key->is_key_set()) {
coordinate_map_key_type out_key =
std::get<0>(p_map_manager->stride(in_key, kernel_stride));
p_out_map_key->set_key(out_key);
}
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
auto const out_nrows = p_map_manager->size(p_out_map_key->get_key());
at::Tensor out_feat =
torch::zeros({out_nrows, in_feat.size(1)}, in_feat.options());
LOG_DEBUG("Allocated", out_nrows, "x", in_feat.size(1), "features.");
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
at::Tensor max_index = torch::empty({0}, torch::TensorOptions()
.device(in_feat.device())
.dtype(torch::kInt)
.requires_grad(false));
max_index.resize_({out_nrows, in_feat.size(1)});
max_index.zero_();
TemplatedAllocator<char> byte_allocator;
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
MaxPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(),
out_feat.template data_ptr<scalar_t>(), out_nrows,
max_index.data_ptr<int>(), in_feat.size(1), in_out,
byte_allocator, stream);
});
return std::make_pair(out_feat, max_index);
} else {
at::Tensor num_nonzero =
torch::empty({0}, in_feat.options().requires_grad(false));
if (pooling_mode == PoolingMode::LOCAL_AVG_POOLING) {
num_nonzero.resize_({out_nrows});
num_nonzero.zero_();
}
cusparseHandle_t handle = at::cuda::getCurrentCUDASparseHandle();
cusparseSetStream(handle, stream);
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_forward_gpu", [&] {
TemplatedAllocator<char> byte_allocator;
NonzeroAvgPoolingForwardKernelGPU<scalar_t, default_types::index_type,
TemplatedAllocator<char>>(
in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
out_feat.template data_ptr<scalar_t>(), out_nrows,
num_nonzero.template data_ptr<scalar_t>(), in_feat.size(1),
in_out, pooling_mode == PoolingMode::LOCAL_AVG_POOLING,
byte_allocator, handle, stream);
});
return std::make_pair(out_feat, num_nonzero);
}
}
template <typename coordinate_type,
template <typename C> class TemplatedAllocator>
at::Tensor LocalPoolingBackwardGPU(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<coordinate_type, TemplatedAllocator> *p_map_manager) {
ASSERT(in_feat.is_contiguous(), "in_feat must be contiguous");
ASSERT(grad_out_feat.is_contiguous(), "grad_out_feata must be contiguous");
ASSERT(in_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(grad_out_feat.is_cuda(), "in_feat must be on CUDA");
ASSERT(in_feat.scalar_type() == grad_out_feat.scalar_type(), "type mismatch");
ASSERT(in_feat.dim() == 2, "in_feat.dim():", in_feat.dim());
ASSERT(grad_out_feat.dim() == 2, "grad_out_feat.dim():", grad_out_feat.dim());
coordinate_map_key_type in_key = p_in_map_key->get_key();
ASSERT(p_map_manager->exists(in_key), ERROR_MAP_NOT_FOUND);
coordinate_map_key_type out_key = p_out_map_key->get_key();
ASSERT(p_map_manager->exists(out_key), ERROR_MAP_NOT_FOUND);
auto const &in_out = p_map_manager->kernel_map(
p_in_map_key, //
p_out_map_key, //
kernel_size, //
kernel_stride, //
kernel_dilation, //
region_type, //
offset, false /* is_transpose */, true /* is_pool */);
at::Tensor grad_in_feat =
torch::zeros({in_feat.size(0), in_feat.size(1)}, in_feat.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();
if (pooling_mode == PoolingMode::LOCAL_MAX_POOLING) {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
MaxPoolingBackwardKernelGPU<scalar_t>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.data_ptr<int>(),
in_feat.size(1), stream);
});
} else {
AT_DISPATCH_FLOATING_TYPES(
in_feat.scalar_type(), "local_pooling_backward_gpu", [&] {
NonzeroAvgPoolingBackwardKernelGPU<
scalar_t, default_types::index_type, TemplatedAllocator<char>>(
grad_in_feat.template data_ptr<scalar_t>(), in_feat.size(0),
grad_out_feat.template data_ptr<scalar_t>(),
grad_out_feat.size(0), num_nonzero.template data_ptr<scalar_t>(),
in_feat.size(1), in_out,
pooling_mode == PoolingMode::LOCAL_AVG_POOLING, stream);
});
}
return grad_in_feat;
}
// Forward
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template std::pair<at::Tensor, at::Tensor>
LocalPoolingForwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat,
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
// Backward
template at::Tensor LocalPoolingBackwardGPU<default_types::dcoordinate_type,
detail::default_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::default_allocator>
*p_map_manager);
template at::Tensor
LocalPoolingBackwardGPU<default_types::dcoordinate_type, detail::c10_allocator>(
at::Tensor const &in_feat, //
at::Tensor const &grad_out_feat, //
at::Tensor const &num_nonzero, //
default_types::stride_type const &kernel_size, //
default_types::stride_type const &kernel_stride, //
default_types::stride_type const &kernel_dilation, //
RegionType::Type const region_type, //
at::Tensor const &offset, //
PoolingMode::Type pooling_mode, //
CoordinateMapKey *p_in_map_key, //
CoordinateMapKey *p_out_map_key, //
gpu_manager_type<default_types::dcoordinate_type, detail::c10_allocator>
*p_map_manager);
} // end namespace minkowski
|
2910b7f13371832e670686d3de07248652968180.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <iostream>
#include "paddle/fluid/operators/center_loss_op.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void ComputeDifferent(T *centers_diff, const T *X, const T *centers,
const int64_t *ids, const int64_t N,
const int64_t K, const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(id >= 0, "received id:", id);
PADDLE_ASSERT_MSG(id < N, "received id:", id);
T *out = centers_diff + idy * D;
const T *x = X + idy * D;
const T *cent = centers + id * D;
for (int i = idx; i < D; i += BlockDimX) {
out[i] = x[i] - cent[i];
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void UpdateCenters(T *centers, T *centers_diff, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const T *alpha) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
int count;
while (idy < K) {
int count = 1;
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(id >= 0, "received id:", id);
PADDLE_ASSERT_MSG(id < N, "received id:", id);
for (int i = 0; i < K; i++) {
if (ids[i] == id) {
count++;
}
}
const T *diff = centers_diff + idy * D;
T *cent = centers + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(¢[i], alpha[0] * diff[i] / count);
}
idy += BlockDimY * GridDimX;
}
}
template <typename DeviceContext, typename T>
class CenterLossCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &device_context = ctx.template device_context<DeviceContext>();
auto stream = device_context.stream();
auto *X = ctx.Input<Tensor>("X"); // deep feature
auto *labels = ctx.Input<Tensor>("Label");
auto *centers = ctx.Input<Tensor>("Centers");
auto *update_rate = ctx.Input<Tensor>("CenterUpdateRate");
int cluster_num = ctx.Attr<int>("cluster_num");
auto *lr_center = update_rate->data<T>();
bool need_update = static_cast<T>(ctx.Attr<bool>("need_update"));
auto x_data = X->data<T>();
auto label_data = labels->data<int64_t>();
auto x_dims = X->dims();
int batch_size = x_dims[0];
const int deep_feat_dim = x_dims[1];
auto *centers_diff = ctx.Output<Tensor>("SampleCenterDiff");
auto centers_diff_data = centers_diff->mutable_data<T>(ctx.GetPlace());
auto centers_data = centers->data<T>();
auto centers_dim = centers->dims();
auto *out_loss = ctx.Output<Tensor>("Loss");
auto loss_data = out_loss->mutable_data<T>(ctx.GetPlace());
auto *centers_out = ctx.Output<Tensor>("CentersOut");
auto *centers_out_data = centers_out->mutable_data<T>(ctx.GetPlace());
auto ctx_place = ctx.GetPlace();
if (centers != centers_out) {
framework::TensorCopy(
*static_cast<const framework::Tensor *>(centers), ctx_place,
*platform::DeviceContextPool::Instance().Get(ctx_place),
static_cast<framework::Tensor *>(centers_out));
}
int64_t numel = X->numel();
size_t N = centers->dims()[0];
size_t D = centers->dims()[1];
size_t K = labels->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
hipLaunchKernelGGL(( ComputeDifferent<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, stream,
centers_diff_data, x_data, centers_data, label_data, N, K, D);
auto &place = *ctx.template device_context<DeviceContext>().eigen_device();
auto sub_result = EigenMatrix<T>::From(*centers_diff);
auto sub_res_pow2 = (sub_result * sub_result) / T(2.0);
auto z = EigenVector<T>::Flatten(*out_loss);
z.device(place) = sub_res_pow2.sum(Eigen::array<int, 1>({{1}}));
if (need_update) {
hipLaunchKernelGGL(( UpdateCenters<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, stream,
centers_out_data, centers_diff_data, label_data, N, K, D, lr_center);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(center_loss, ops::CenterLossCUDAKernel<GPUCtx, float>,
ops::CenterLossCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(center_loss_grad,
ops::CenterLossGradKernel<GPUCtx, float>,
ops::CenterLossGradKernel<GPUCtx, double>);
| 2910b7f13371832e670686d3de07248652968180.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <iostream>
#include "paddle/fluid/operators/center_loss_op.h"
#include "paddle/fluid/platform/assert.h"
#include "paddle/fluid/platform/cuda_primitives.h"
#include "paddle/fluid/platform/gpu_info.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void ComputeDifferent(T *centers_diff, const T *X, const T *centers,
const int64_t *ids, const int64_t N,
const int64_t K, const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
while (idy < K) {
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(id >= 0, "received id:", id);
PADDLE_ASSERT_MSG(id < N, "received id:", id);
T *out = centers_diff + idy * D;
const T *x = X + idy * D;
const T *cent = centers + id * D;
for (int i = idx; i < D; i += BlockDimX) {
out[i] = x[i] - cent[i];
}
idy += BlockDimY * GridDimX;
}
}
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void UpdateCenters(T *centers, T *centers_diff, const int64_t *ids,
const int64_t N, const int64_t K, const int64_t D,
const T *alpha) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * GridDimX;
int count;
while (idy < K) {
int count = 1;
int64_t id = ids[idy];
PADDLE_ASSERT_MSG(id >= 0, "received id:", id);
PADDLE_ASSERT_MSG(id < N, "received id:", id);
for (int i = 0; i < K; i++) {
if (ids[i] == id) {
count++;
}
}
const T *diff = centers_diff + idy * D;
T *cent = centers + id * D;
for (int i = idx; i < D; i += BlockDimX) {
paddle::platform::CudaAtomicAdd(¢[i], alpha[0] * diff[i] / count);
}
idy += BlockDimY * GridDimX;
}
}
template <typename DeviceContext, typename T>
class CenterLossCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto &device_context = ctx.template device_context<DeviceContext>();
auto stream = device_context.stream();
auto *X = ctx.Input<Tensor>("X"); // deep feature
auto *labels = ctx.Input<Tensor>("Label");
auto *centers = ctx.Input<Tensor>("Centers");
auto *update_rate = ctx.Input<Tensor>("CenterUpdateRate");
int cluster_num = ctx.Attr<int>("cluster_num");
auto *lr_center = update_rate->data<T>();
bool need_update = static_cast<T>(ctx.Attr<bool>("need_update"));
auto x_data = X->data<T>();
auto label_data = labels->data<int64_t>();
auto x_dims = X->dims();
int batch_size = x_dims[0];
const int deep_feat_dim = x_dims[1];
auto *centers_diff = ctx.Output<Tensor>("SampleCenterDiff");
auto centers_diff_data = centers_diff->mutable_data<T>(ctx.GetPlace());
auto centers_data = centers->data<T>();
auto centers_dim = centers->dims();
auto *out_loss = ctx.Output<Tensor>("Loss");
auto loss_data = out_loss->mutable_data<T>(ctx.GetPlace());
auto *centers_out = ctx.Output<Tensor>("CentersOut");
auto *centers_out_data = centers_out->mutable_data<T>(ctx.GetPlace());
auto ctx_place = ctx.GetPlace();
if (centers != centers_out) {
framework::TensorCopy(
*static_cast<const framework::Tensor *>(centers), ctx_place,
*platform::DeviceContextPool::Instance().Get(ctx_place),
static_cast<framework::Tensor *>(centers_out));
}
int64_t numel = X->numel();
size_t N = centers->dims()[0];
size_t D = centers->dims()[1];
size_t K = labels->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
ComputeDifferent<T, 128, 8, 8><<<grids, threads, 0, stream>>>(
centers_diff_data, x_data, centers_data, label_data, N, K, D);
auto &place = *ctx.template device_context<DeviceContext>().eigen_device();
auto sub_result = EigenMatrix<T>::From(*centers_diff);
auto sub_res_pow2 = (sub_result * sub_result) / T(2.0);
auto z = EigenVector<T>::Flatten(*out_loss);
z.device(place) = sub_res_pow2.sum(Eigen::array<int, 1>({{1}}));
if (need_update) {
UpdateCenters<T, 128, 8, 8><<<grids, threads, 0, stream>>>(
centers_out_data, centers_diff_data, label_data, N, K, D, lr_center);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using GPUCtx = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(center_loss, ops::CenterLossCUDAKernel<GPUCtx, float>,
ops::CenterLossCUDAKernel<GPUCtx, double>);
REGISTER_OP_CUDA_KERNEL(center_loss_grad,
ops::CenterLossGradKernel<GPUCtx, float>,
ops::CenterLossGradKernel<GPUCtx, double>);
|
134a55569848e01c0faede3b3524c8cee7972473.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/opencv.hpp>
#include <vector>
__global__ void blur ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i > 0 && i < (cols - 1) && j > 0 && j < (rows - 1)) {
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 1) * cols + i - 1) * 3 + c] + data[((j - 1) * cols + i + 1) * 3 + c]
+ data[( j * cols + i - 1) * 3 + c] + data[( j * cols + i + 1) * 3 + c]
+ data[((j + 1) * cols + i - 1) * 3 + c] + data[((j + 1) * cols + i + 1) * 3 + c]
+ data[(( j - 1) * cols + i) * 3 + c] + data[( j * cols + i) * 3 + c]
+ data[(( j + 1) * cols + i) * 3 + c];
out[(j * cols + i) * 3 + c] = (gu / 9);
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
hipMalloc( &rgb_d, 3 * rows * cols);
hipMalloc( &out, 3 * rows * cols );
hipMemcpy( rgb_d, rgb, 3 * rows * cols, hipMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 be(( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 );
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols - 1) / t.x + 1 ), 2 * (( rows - 1 ) / t.y + 1 ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols - 1) / t.x + 1 ), 8 * (( rows - 1 ) / t.y + 1 ));
hipEvent_t start, stop;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start );
hipLaunchKernelGGL(( blur), dim3(be), dim3(t) , 0, 0, rgb_d, out, cols, rows );
hipMemcpy(g.data(), out, 3 * rows * cols, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
auto hipError_t = hipGetLastError();
// Si pas d'erreur dtecte dans le bordel ben on aura hipSuccess
if (hipError_t != hipSuccess){
std::cout << hipGetErrorName(hipError_t) << std::endl;
std::cout << hipGetErrorString(hipError_t) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
hipEventRecord( stop );
hipEventSynchronize( stop );
float duration = 0.0f;
hipEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlur.jpg", m_out );
hipFree( rgb_d);
//hipFree( g_d);
hipFree ( out);
return 0;
}
| 134a55569848e01c0faede3b3524c8cee7972473.cu | #include <opencv2/opencv.hpp>
#include <vector>
__global__ void blur ( unsigned char * data, unsigned char * out, std::size_t cols, std::size_t rows) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if ( i > 0 && i < (cols - 1) && j > 0 && j < (rows - 1)) {
for (auto c = 0; c < 3; ++c){
auto gu = data[((j - 1) * cols + i - 1) * 3 + c] + data[((j - 1) * cols + i + 1) * 3 + c]
+ data[( j * cols + i - 1) * 3 + c] + data[( j * cols + i + 1) * 3 + c]
+ data[((j + 1) * cols + i - 1) * 3 + c] + data[((j + 1) * cols + i + 1) * 3 + c]
+ data[(( j - 1) * cols + i) * 3 + c] + data[( j * cols + i) * 3 + c]
+ data[(( j + 1) * cols + i) * 3 + c];
out[(j * cols + i) * 3 + c] = (gu / 9);
}
}
}
int main()
{
cv::Mat m_in = cv::imread("in.jpg", cv::IMREAD_UNCHANGED );
auto rgb = m_in.data;
auto rows = m_in.rows;
auto cols = m_in.cols;
std::vector< unsigned char > g( 3 * rows * cols );
cv::Mat m_out( rows, cols, CV_8UC3, g.data() );
unsigned char * rgb_d;
unsigned char * out;
cudaMalloc( &rgb_d, 3 * rows * cols);
cudaMalloc( &out, 3 * rows * cols );
cudaMemcpy( rgb_d, rgb, 3 * rows * cols, cudaMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 be(( cols - 1) / t.x + 1 , ( rows - 1 ) / t.y + 1 );
// dim3 t( 16, 16 );
// dim3 be( 3 * 2 * (( cols - 1) / t.x + 1 ), 2 * (( rows - 1 ) / t.y + 1 ));
// dim3 t( 4, 4 );
// dim3 be( 3 * 8 * (( cols - 1) / t.x + 1 ), 8 * (( rows - 1 ) / t.y + 1 ));
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
blur<<< be, t >>>( rgb_d, out, cols, rows );
cudaMemcpy(g.data(), out, 3 * rows * cols, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
auto cudaError = cudaGetLastError();
// Si pas d'erreur détectée dans le bordel ben on aura cudaSuccess
if (cudaError != cudaSuccess){
std::cout << cudaGetErrorName(cudaError) << std::endl;
std::cout << cudaGetErrorString(cudaError) << std::endl;
}
else {
std::cout << "Aucune erreur" << std::endl;
}
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float duration = 0.0f;
cudaEventElapsedTime( &duration, start, stop );
std::cout << "Total: " << duration << "ms\n";
cv::imwrite( "outBlur.jpg", m_out );
cudaFree( rgb_d);
//cudaFree( g_d);
cudaFree ( out);
return 0;
}
|
d05d205db2494fc8e6930677311de2cf2d27ffe9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix addition: C = A + B.
* Matrix subtraction: C = A - B.
* Matrixr transpose: B = A^t
* Matrix Multiplication: C = A * B.
* Matrix Copy: B = A.
*/
#ifndef _MATRIX_KERNEL_H_
#define _MATRIX_KERNEL_H_
// Device code
__global__ void MatAdd(float* C, const float* A, const float* B, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
C[i] = A[i] + B[i];
}
__global__ void MatSub(float* C, const float* A, const float* B, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
C[i] = A[i] - B[i];
}
__global__ void MatTranspose(float* B, const float* A, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
B[(i % w) * h + (i / w)] = A[i];
}
//Matrix Multipication
__global__ void MatMult(float* C,const float* A, const float* B, int Aw, int Bw)
{
// Indexes
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int BLOCK_SIZE=32;
// Shared memory for A and B
__shared__ float s_A[16][16];
__shared__ float s_B[16][16];
int A_start = Aw * BLOCK_SIZE * by;
int A_stop = A_start + Aw - 1;
int A_step = BLOCK_SIZE;
int B_start = BLOCK_SIZE * bx;
int B_step = BLOCK_SIZE * Bw;
float s_C = 0;
for (int a = A_start, b = B_start;a <= A_stop;a += A_step, b += B_step)
{
s_A[ty][tx] = A[a + Aw * ty + tx];
s_B[tx][ty] = B[b + Bw * tx + ty];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
s_C += s_A[ty][k] * s_B[k][tx];
__syncthreads();
}
int c = Bw * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + Bw * ty + tx] = s_C;
}
//MATCOPY
__global__ void MatCopy(float* B, const float* A, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
B[i] = A[i];
}
#endif // _MATRIX_KERNEL_H_
| d05d205db2494fc8e6930677311de2cf2d27ffe9.cu | /* Matrix addition: C = A + B.
* Matrix subtraction: C = A - B.
* Matrixr transpose: B = A^t
* Matrix Multiplication: C = A * B.
* Matrix Copy: B = A.
*/
#ifndef _MATRIX_KERNEL_H_
#define _MATRIX_KERNEL_H_
// Device code
__global__ void MatAdd(float* C, const float* A, const float* B, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
C[i] = A[i] + B[i];
}
__global__ void MatSub(float* C, const float* A, const float* B, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
C[i] = A[i] - B[i];
}
__global__ void MatTranspose(float* B, const float* A, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
B[(i % w) * h + (i / w)] = A[i];
}
//Matrix Multipication
__global__ void MatMult(float* C,const float* A, const float* B, int Aw, int Bw)
{
// Indexes
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int BLOCK_SIZE=32;
// Shared memory for A and B
__shared__ float s_A[16][16];
__shared__ float s_B[16][16];
int A_start = Aw * BLOCK_SIZE * by;
int A_stop = A_start + Aw - 1;
int A_step = BLOCK_SIZE;
int B_start = BLOCK_SIZE * bx;
int B_step = BLOCK_SIZE * Bw;
float s_C = 0;
for (int a = A_start, b = B_start;a <= A_stop;a += A_step, b += B_step)
{
s_A[ty][tx] = A[a + Aw * ty + tx];
s_B[tx][ty] = B[b + Bw * tx + ty];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
s_C += s_A[ty][k] * s_B[k][tx];
__syncthreads();
}
int c = Bw * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + Bw * ty + tx] = s_C;
}
//MATCOPY
__global__ void MatCopy(float* B, const float* A, int h, int w)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < h * w)
B[i] = A[i];
}
#endif // _MATRIX_KERNEL_H_
|
2945bb91cbeac43f6120db51b7aa6bd171a01db7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from dgemm_tesla_T_T_special.cu normal d -> s, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices divisible by the corresponding
blocking sizes.
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_T_T_64_16_16_16_4_special(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int iby = ((blockIdx.y + blockIdx.x) % (n/16))*16;
const int idt = ty * 16 + tx;
int ibx = blockIdx.x * 64+idt;
//int iby = blockIdx.y * 16;
A += ibx;
B += tx + __mul24(iby+ty, ldb);
C += __mul24(ibx, ldc) + iby;
const float *Bend = B + k;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[0*ldb];
Bb[tx][ty+4 ] = B[4*ldb];
Bb[tx][ty+8 ] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for(int i = 0; i < 16; i++) {
C[i] = alpha * Cb[i] + beta * C[i];
}
}
extern "C" void
magmablas_sgemm_T_T_64_16_16_16_4_special(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( m/64, n/16 );
hipLaunchKernelGGL(( sgemm_kernel_T_T_64_16_16_16_4_special), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| 2945bb91cbeac43f6120db51b7aa6bd171a01db7.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from dgemm_tesla_T_T_special.cu normal d -> s, Tue Sep 2 12:38:17 2014
*/
#include "common_magma.h"
#include "commonblas_s.h"
/*
* saxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void saxpy(
float alpha,
const float* __restrict__ b,
float* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B^T + beta * C
B is put into shared memory
Parameters Used:
blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4
This kernel is for matrices divisible by the corresponding
blocking sizes.
@ingroup magma_sblas3
********************************************************************/
__global__ void
sgemm_kernel_T_T_64_16_16_16_4_special(
float* __restrict__ C,
const float* __restrict__ A,
const float* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
float alpha, float beta )
{
__shared__ float Bb[16][17];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
int iby = ((blockIdx.y + blockIdx.x) % (n/16))*16;
const int idt = ty * 16 + tx;
int ibx = blockIdx.x * 64+idt;
//int iby = blockIdx.y * 16;
A += ibx;
B += tx + __mul24(iby+ty, ldb);
C += __mul24(ibx, ldc) + iby;
const float *Bend = B + k;
float Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
do {
float Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]};
Bb[tx][ty+0 ] = B[0*ldb];
Bb[tx][ty+4 ] = B[4*ldb];
Bb[tx][ty+8 ] = B[8*ldb];
Bb[tx][ty+12] = B[12*ldb];
__syncthreads();
A += 4 * lda;
saxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda];
saxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda];
saxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda];
saxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda];
A += 4 * lda;
saxpy( Ab[0], &Bb[12][0], Cb );
saxpy( Ab[1], &Bb[13][0], Cb );
saxpy( Ab[2], &Bb[14][0], Cb );
saxpy( Ab[3], &Bb[15][0], Cb );
B += 16;
__syncthreads();
} while (B < Bend);
#pragma unroll 16
for(int i = 0; i < 16; i++) {
C[i] = alpha * Cb[i] + beta * C[i];
}
}
extern "C" void
magmablas_sgemm_T_T_64_16_16_16_4_special(
float *C, const float *A, const float *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
float alpha, float beta )
{
dim3 threads( 16, 4 );
dim3 grid( m/64, n/16 );
sgemm_kernel_T_T_64_16_16_16_4_special<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
10ec7ab566152e8e5777c3c9e63418d204c560a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "VeloUT.cuh"
__global__ void veloUT(
uint* dev_ut_hits,
uint* dev_ut_hit_offsets,
int* dev_atomics_storage,
uint* dev_velo_track_hit_number,
uint* dev_velo_track_hits,
uint* dev_velo_states,
VeloUTTracking::TrackUT* dev_veloUT_tracks,
int* dev_atomics_veloUT,
PrUTMagnetTool* dev_ut_magnet_tool,
float* dev_ut_dxDy,
const uint* dev_unique_x_sector_layer_offsets,
const uint* dev_unique_x_sector_offsets,
const float* dev_unique_sector_xs
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const uint number_of_unique_x_sectors = dev_unique_x_sector_layer_offsets[4];
const uint total_number_of_hits = dev_ut_hit_offsets[number_of_events * number_of_unique_x_sectors];
// Velo consolidated types
const Velo::Consolidated::Tracks velo_tracks {(uint*) dev_atomics_storage, dev_velo_track_hit_number, event_number, number_of_events};
const Velo::Consolidated::States velo_states {dev_velo_states, velo_tracks.total_number_of_tracks};
const uint number_of_tracks_event = velo_tracks.number_of_tracks(event_number);
const uint event_tracks_offset = velo_tracks.tracks_offset(event_number);
UTHitOffsets ut_hit_offsets {dev_ut_hit_offsets, event_number, number_of_unique_x_sectors, dev_unique_x_sector_layer_offsets};
UTHits ut_hits;
ut_hits.typecast_sorted(dev_ut_hits, total_number_of_hits);
/* dev_atomics_veloUT contains in an SoA:
1. # of veloUT tracks
2. # velo tracks in UT acceptance
*/
int* n_veloUT_tracks_event = dev_atomics_veloUT + event_number;
VeloUTTracking::TrackUT* veloUT_tracks_event = dev_veloUT_tracks + event_number * VeloUTTracking::max_num_tracks;
int* n_velo_tracks_in_UT_event = dev_atomics_veloUT + number_of_events + event_number;
// initialize atomic veloUT tracks counter
if ( threadIdx.x == 0 ) {
*n_veloUT_tracks_event = 0;
*n_velo_tracks_in_UT_event = 0;
}
__syncthreads();
// if (threadIdx.x == 0) {
// for (int i=0; i<4; ++i) {
// printf("Layer %i hits:\n", i);
// for (int s=dev_unique_x_sector_layer_offsets[i]; s<dev_unique_x_sector_layer_offsets[i+1]; ++s) {
// printf(" Sector group %i, x %f:\n", s, dev_unique_sector_xs[s]);
// uint group_offset = ut_hit_offsets.sector_group_offset(s);
// uint n_hits_group = ut_hit_offsets.sector_group_number_of_hits(s);
// for (int j=0; j<n_hits_group; ++j) {
// const auto hit_index = group_offset + j;
// printf(" yBegin = %f, yEnd = %f, zAtYEq0 = %f, xAtYEq0 = %f, weight = %f, highThreshold = %u \n",
// ut_hits.yBegin[hit_index],
// ut_hits.yEnd[hit_index],
// ut_hits.zAtYEq0[hit_index],
// ut_hits.xAtYEq0[hit_index],
// ut_hits.weight[hit_index],
// ut_hits.highThreshold[hit_index]);
// }
// }
// }
// }
const float* fudgeFactors = &(dev_ut_magnet_tool->dxLayTable[0]);
const float* bdlTable = &(dev_ut_magnet_tool->bdlTable[0]);
// array to store indices of selected hits in layers
// -> can then access the hit information in the HitsSoA
int hitCandidatesInLayers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer];
int n_hitCandidatesInLayers[VeloUTTracking::n_layers];
for ( int i = 0; i < (number_of_tracks_event + blockDim.x - 1) / blockDim.x; ++i) {
const int i_track = i * blockDim.x + threadIdx.x;
const uint velo_states_index = event_tracks_offset + i_track;
if (i_track >= number_of_tracks_event) continue;
if (velo_states.backward[velo_states_index]) continue;
// Mini State with only x, y, tx, ty and z
MiniState velo_state {velo_states, velo_states_index};
if(!veloTrackInUTAcceptance(velo_state)) continue;
atomicAdd(n_velo_tracks_in_UT_event, 1);
// for storing calculated x position of hits for this track
float x_pos_layers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer];
for ( int i_layer = 0; i_layer < VeloUTTracking::n_layers; ++i_layer ) {
n_hitCandidatesInLayers[i_layer] = 0;
}
if( !getHits(
hitCandidatesInLayers,
n_hitCandidatesInLayers,
x_pos_layers,
ut_hits,
ut_hit_offsets,
fudgeFactors,
velo_state,
dev_ut_dxDy,
dev_unique_sector_xs,
dev_unique_x_sector_layer_offsets)
) continue;
TrackHelper helper {velo_state};
// indices within hitCandidatesInLayers for selected hits belonging to best track
int hitCandidateIndices[VeloUTTracking::n_layers];
// go through UT layers in forward direction
if(!formClusters(
hitCandidatesInLayers,
n_hitCandidatesInLayers,
x_pos_layers,
hitCandidateIndices,
ut_hits,
ut_hit_offsets,
helper,
velo_state,
dev_ut_dxDy,
true)) {
// go through UT layers in backward direction
formClusters(
hitCandidatesInLayers,
n_hitCandidatesInLayers,
x_pos_layers,
hitCandidateIndices,
ut_hits,
ut_hit_offsets,
helper,
velo_state,
dev_ut_dxDy,
false);
}
if ( helper.n_hits > 0 ) {
const uint velo_track_hit_number = velo_tracks.number_of_hits(i_track);
const Velo::Consolidated::Hits velo_track_hits = velo_tracks.get_hits(dev_velo_track_hits, i_track);
prepareOutputTrack(
velo_track_hits,
velo_track_hit_number,
helper,
velo_state,
hitCandidatesInLayers,
n_hitCandidatesInLayers,
ut_hits,
ut_hit_offsets,
x_pos_layers,
hitCandidateIndices,
veloUT_tracks_event,
n_veloUT_tracks_event,
bdlTable);
}
} // velo tracks
}
| 10ec7ab566152e8e5777c3c9e63418d204c560a3.cu | #include "VeloUT.cuh"
__global__ void veloUT(
uint* dev_ut_hits,
uint* dev_ut_hit_offsets,
int* dev_atomics_storage,
uint* dev_velo_track_hit_number,
uint* dev_velo_track_hits,
uint* dev_velo_states,
VeloUTTracking::TrackUT* dev_veloUT_tracks,
int* dev_atomics_veloUT,
PrUTMagnetTool* dev_ut_magnet_tool,
float* dev_ut_dxDy,
const uint* dev_unique_x_sector_layer_offsets,
const uint* dev_unique_x_sector_offsets,
const float* dev_unique_sector_xs
) {
const uint number_of_events = gridDim.x;
const uint event_number = blockIdx.x;
const uint number_of_unique_x_sectors = dev_unique_x_sector_layer_offsets[4];
const uint total_number_of_hits = dev_ut_hit_offsets[number_of_events * number_of_unique_x_sectors];
// Velo consolidated types
const Velo::Consolidated::Tracks velo_tracks {(uint*) dev_atomics_storage, dev_velo_track_hit_number, event_number, number_of_events};
const Velo::Consolidated::States velo_states {dev_velo_states, velo_tracks.total_number_of_tracks};
const uint number_of_tracks_event = velo_tracks.number_of_tracks(event_number);
const uint event_tracks_offset = velo_tracks.tracks_offset(event_number);
UTHitOffsets ut_hit_offsets {dev_ut_hit_offsets, event_number, number_of_unique_x_sectors, dev_unique_x_sector_layer_offsets};
UTHits ut_hits;
ut_hits.typecast_sorted(dev_ut_hits, total_number_of_hits);
/* dev_atomics_veloUT contains in an SoA:
1. # of veloUT tracks
2. # velo tracks in UT acceptance
*/
int* n_veloUT_tracks_event = dev_atomics_veloUT + event_number;
VeloUTTracking::TrackUT* veloUT_tracks_event = dev_veloUT_tracks + event_number * VeloUTTracking::max_num_tracks;
int* n_velo_tracks_in_UT_event = dev_atomics_veloUT + number_of_events + event_number;
// initialize atomic veloUT tracks counter
if ( threadIdx.x == 0 ) {
*n_veloUT_tracks_event = 0;
*n_velo_tracks_in_UT_event = 0;
}
__syncthreads();
// if (threadIdx.x == 0) {
// for (int i=0; i<4; ++i) {
// printf("Layer %i hits:\n", i);
// for (int s=dev_unique_x_sector_layer_offsets[i]; s<dev_unique_x_sector_layer_offsets[i+1]; ++s) {
// printf(" Sector group %i, x %f:\n", s, dev_unique_sector_xs[s]);
// uint group_offset = ut_hit_offsets.sector_group_offset(s);
// uint n_hits_group = ut_hit_offsets.sector_group_number_of_hits(s);
// for (int j=0; j<n_hits_group; ++j) {
// const auto hit_index = group_offset + j;
// printf(" yBegin = %f, yEnd = %f, zAtYEq0 = %f, xAtYEq0 = %f, weight = %f, highThreshold = %u \n",
// ut_hits.yBegin[hit_index],
// ut_hits.yEnd[hit_index],
// ut_hits.zAtYEq0[hit_index],
// ut_hits.xAtYEq0[hit_index],
// ut_hits.weight[hit_index],
// ut_hits.highThreshold[hit_index]);
// }
// }
// }
// }
const float* fudgeFactors = &(dev_ut_magnet_tool->dxLayTable[0]);
const float* bdlTable = &(dev_ut_magnet_tool->bdlTable[0]);
// array to store indices of selected hits in layers
// -> can then access the hit information in the HitsSoA
int hitCandidatesInLayers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer];
int n_hitCandidatesInLayers[VeloUTTracking::n_layers];
for ( int i = 0; i < (number_of_tracks_event + blockDim.x - 1) / blockDim.x; ++i) {
const int i_track = i * blockDim.x + threadIdx.x;
const uint velo_states_index = event_tracks_offset + i_track;
if (i_track >= number_of_tracks_event) continue;
if (velo_states.backward[velo_states_index]) continue;
// Mini State with only x, y, tx, ty and z
MiniState velo_state {velo_states, velo_states_index};
if(!veloTrackInUTAcceptance(velo_state)) continue;
atomicAdd(n_velo_tracks_in_UT_event, 1);
// for storing calculated x position of hits for this track
float x_pos_layers[VeloUTTracking::n_layers][VeloUTTracking::max_hit_candidates_per_layer];
for ( int i_layer = 0; i_layer < VeloUTTracking::n_layers; ++i_layer ) {
n_hitCandidatesInLayers[i_layer] = 0;
}
if( !getHits(
hitCandidatesInLayers,
n_hitCandidatesInLayers,
x_pos_layers,
ut_hits,
ut_hit_offsets,
fudgeFactors,
velo_state,
dev_ut_dxDy,
dev_unique_sector_xs,
dev_unique_x_sector_layer_offsets)
) continue;
TrackHelper helper {velo_state};
// indices within hitCandidatesInLayers for selected hits belonging to best track
int hitCandidateIndices[VeloUTTracking::n_layers];
// go through UT layers in forward direction
if(!formClusters(
hitCandidatesInLayers,
n_hitCandidatesInLayers,
x_pos_layers,
hitCandidateIndices,
ut_hits,
ut_hit_offsets,
helper,
velo_state,
dev_ut_dxDy,
true)) {
// go through UT layers in backward direction
formClusters(
hitCandidatesInLayers,
n_hitCandidatesInLayers,
x_pos_layers,
hitCandidateIndices,
ut_hits,
ut_hit_offsets,
helper,
velo_state,
dev_ut_dxDy,
false);
}
if ( helper.n_hits > 0 ) {
const uint velo_track_hit_number = velo_tracks.number_of_hits(i_track);
const Velo::Consolidated::Hits velo_track_hits = velo_tracks.get_hits(dev_velo_track_hits, i_track);
prepareOutputTrack(
velo_track_hits,
velo_track_hit_number,
helper,
velo_state,
hitCandidatesInLayers,
n_hitCandidatesInLayers,
ut_hits,
ut_hit_offsets,
x_pos_layers,
hitCandidateIndices,
veloUT_tracks_event,
n_veloUT_tracks_event,
bdlTable);
}
} // velo tracks
}
|
2ba0773444b2b273bf0c6eca5a3cc83285692c6a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 65536
#define globalDimY 1
__global__ void reduction_complex(float * A, float * B, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
int k;
float sum;
int nidx;
__shared__ float shared_0[512];
nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16));
float tmp_4;
float tmp_5;
float tmp_2;
float tmp_3;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(nidx+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
float a;
float b;
float c;
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_4=c;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+65536)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(((nidx+65536)+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_5=c;
a=tmp_4;
b=tmp_5;
c=(a+b);
shared_0[(tidx+0)]=c;
__syncthreads();
if ((nidx<32768))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+256)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<16384))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+128)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<8192))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+64)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<4096))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+32)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<2048))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+16)];
c=(a+b);
{
B[nidx]=c;
}
}
}
| 2ba0773444b2b273bf0c6eca5a3cc83285692c6a.cu | #define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 65536
#define globalDimY 1
__global__ void reduction_complex(float * A, float * B, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
int k;
float sum;
int nidx;
__shared__ float shared_0[512];
nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16));
float tmp_4;
float tmp_5;
float tmp_2;
float tmp_3;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(nidx+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
float a;
float b;
float c;
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_4=c;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+65536)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(((nidx+65536)+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_5=c;
a=tmp_4;
b=tmp_5;
c=(a+b);
shared_0[(tidx+0)]=c;
__syncthreads();
if ((nidx<32768))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+256)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<16384))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+128)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<8192))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+64)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<4096))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+32)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<2048))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+16)];
c=(a+b);
{
B[nidx]=c;
}
}
}
|
61f10f7db14263c10450ec86235bdabe2421cb59.hip | // !!! This is a file automatically generated by hipify!!!
// Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rb;
rb=rb*rc;
rc=rc*rd;
rd=rd*ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
hipEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size);
// allocate host memory
float *h_iA = (float *) malloc(mem_size);
float *h_oC1 = (float *) malloc(mem_size);
float *h_oC2 = (float *) malloc(mem_size);
float *h_oC3 = (float *) malloc(mem_size);
float *h_oC4 = (float *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (float) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
hipMalloc((void **) &d_iA, mem_size);
// hipMalloc((void **) &d_iB, mem_size);
hipMalloc((void **) &d_oC1, mem_size);
hipMalloc((void **) &d_oC2, mem_size);
hipMalloc((void **) &d_oC3, mem_size);
hipMalloc((void **) &d_oC4, mem_size);
// copy host data to device
hipMemcpy(d_iA, h_iA, mem_size, hipMemcpyHostToDevice);
// hipMemcpy(d_iB, h_iB, mem_size, hipMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
hipEventCreate(&start);
hipEventCreate(&stop);
// take measurements for loop over kernel launches
hipEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
hipLaunchKernelGGL(( simpleKernel<float>), dim3(grid), dim3(threads), 0, 0, d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float kernelTime;
hipEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
hipMemcpy(h_oC1, d_oC1, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC2, d_oC2, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC3, d_oC3, mem_size, hipMemcpyDeviceToHost);
hipMemcpy(h_oC4, d_oC4, mem_size, hipMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
hipFree(d_iA);
// hipFree(d_iB);
hipFree(d_oC1);
hipFree(d_oC2);
hipFree(d_oC3);
hipFree(d_oC4);
hipEventDestroy(start);
hipEventDestroy(stop);
hipDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
| 61f10f7db14263c10450ec86235bdabe2421cb59.cu | // Utilities and system includes
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_profiler_api.h>
#define DATA_TYPE 1 // 0-SP, 1-INT, 2-DP
#define SIZE 60000000
#define TILE_DIM 1024
#define INNER_REPS 4
template <class T> __global__ void simpleKernel(T *A, T *C1, T *C2, T *C3, T *C4)
{
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
T ra, rb, rc, rd;
if (xIndex < SIZE) {
ra=A[xIndex];
rb=A[SIZE-xIndex];
rc=A[xIndex];
rd=A[SIZE-xIndex];
// rb=A[xIndex];
#pragma unroll 4
for (int i=0;i<INNER_REPS;i++) {
ra=ra*rb;
rb=rb*rc;
rc=rc*rd;
rd=rd*ra;
}
C1[xIndex]=ra;
C2[xIndex]=rb;
C3[xIndex]=rc;
C4[xIndex]=rd;
}
}
int main(int argc, char **argv) {
int outer_reps, vector_size, tile_dim;
vector_size = SIZE;
tile_dim = TILE_DIM;
if (argc>1){
outer_reps = atoi(argv[1]);
}else{
outer_reps = 1;
}
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
cudaEvent_t start, stop;
size_t mem_size = static_cast<size_t>(sizeof(float) * vector_size);
// allocate host memory
float *h_iA = (float *) malloc(mem_size);
float *h_oC1 = (float *) malloc(mem_size);
float *h_oC2 = (float *) malloc(mem_size);
float *h_oC3 = (float *) malloc(mem_size);
float *h_oC4 = (float *) malloc(mem_size);
// initalize host data
for (int i = 0; i < vector_size; ++i)
{
h_iA[i] = (float) i+3;
// h_iB[i] = (float) i+3;
}
// allocate device memory
float *d_iA, *d_iB, *d_oC1, *d_oC2, *d_oC3, *d_oC4;
cudaMalloc((void **) &d_iA, mem_size);
// cudaMalloc((void **) &d_iB, mem_size);
cudaMalloc((void **) &d_oC1, mem_size);
cudaMalloc((void **) &d_oC2, mem_size);
cudaMalloc((void **) &d_oC3, mem_size);
cudaMalloc((void **) &d_oC4, mem_size);
// copy host data to device
cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_iB, h_iB, mem_size, cudaMemcpyHostToDevice);
// print out common data for all kernels
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < outer_reps; i++)
{
simpleKernel<float><<<grid, threads>>>(d_iA, d_oC1, d_oC2, d_oC3, d_oC4);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
// take measurements for loop inside kernel
cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC2, d_oC2, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC3, d_oC3, mem_size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_oC4, d_oC4, mem_size, cudaMemcpyDeviceToHost);
printf("teste: %f\n", h_oC1[0]);
// report effective bandwidths
float kernelBandwidth = 2.0f * 1000.0f * mem_size/(1024*1024*1024)/(kernelTime/outer_reps);
printf("simpleKernel, Throughput = %.4f GB/s, Time = %.5f ms, Size = %u fp32 elements, NumDevsUsed = %u, Workgroup = %u\n",
kernelBandwidth,
kernelTime/outer_reps,
vector_size, 1, tile_dim * 1);
free(h_iA);
// free(h_iB);
free(h_oC1);
free(h_oC2);
free(h_oC3);
free(h_oC4);
cudaFree(d_iA);
// cudaFree(d_iB);
cudaFree(d_oC1);
cudaFree(d_oC2);
cudaFree(d_oC3);
cudaFree(d_oC4);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
4a1807d14fd19abea0298077a99a3a567df7c5bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val;
} | 4a1807d14fd19abea0298077a99a3a567df7c5bc.cu | #include "includes.h"
__global__ void kGreaterThanEqScalar(float* mat, float val, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = mat[i] >= val;
} |
e900b32da270640f9d5bc857de98f06ee7476507.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "CudaMemPtr.h"
#include <stdio.h>
hipError_t trainWithCuda(CudaMemPtr* cudaMem, const double* weights,
double w0, double* results);
void cudaAllocationAndCpy(CudaMemPtr* cudaMem, const double* coords, int numOfPoints, int dimensions);
void checkErrors(hipError_t cudaStatus, CudaMemPtr* cudaMem, const char* errorMessage);
void freeCudaMem(CudaMemPtr* cudaMem);
__global__ void trainKernel(double *coords, const double * weights, double w0,
double *results, int dimensions, int numOfPoints, int pointsPerBlock);
__global__ void trainKernel(double *coords, const double * weights, double w0,
double *results, int dimensions, int numOfPoints, int pointsPerBlock)
{
int id = blockIdx.x * pointsPerBlock + threadIdx.x, i;
if (blockIdx.x == gridDim.x - 1 && numOfPoints % blockDim.x <= threadIdx.x)
return;
results[id] = w0;
for (i = 0; i < dimensions; i++)
{
results[id] += coords[id*dimensions + i] * weights[i];
}
}
void cudaAllocationAndCpy(CudaMemPtr* cudaMem, const double* coords, int numOfPoints, int dimensions)
{
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
checkErrors(cudaStatus, cudaMem,"hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
cudaStatus = hipMalloc((void**)&cudaMem->cudaCoords, numOfPoints * dimensions * sizeof(double));
checkErrors(cudaStatus, cudaMem, "hipMalloc failed! cudaCoords");
cudaStatus = hipMalloc((void**)&cudaMem->cudaWeights, dimensions * sizeof(double));
checkErrors(cudaStatus, cudaMem,"hipMalloc failed! cudaWeights");
cudaStatus = hipMalloc((void**)&cudaMem->cudaResults, numOfPoints * sizeof(double));
checkErrors(cudaStatus, cudaMem, "hipMalloc failed! cudaWeights");
cudaStatus = hipMemcpy(cudaMem->cudaCoords, coords, numOfPoints * dimensions * sizeof(double), hipMemcpyHostToDevice);
checkErrors(cudaStatus, cudaMem, "hipMemcpy failed! cudaCoords");
cudaMem->numOfPoints = numOfPoints;
cudaMem->dimensions = dimensions;
}
hipError_t trainWithCuda(CudaMemPtr* cudaMem, const double* weights,
double w0, double* results)
{
double* cudaCoords = cudaMem->cudaCoords;
double* cudaWeights = cudaMem->cudaWeights;
double* cudaResults = cudaMem->cudaResults;
int numOfPoints = cudaMem->numOfPoints;
int dimensions = cudaMem->dimensions;
hipError_t cudaStatus;
int numOfBlocks, remainBlock, pointsPerBlock;
hipDeviceProp_t prop;
cudaMem->w0 = w0;
hipGetDeviceProperties(&prop, 0);
cudaStatus = hipMemcpy(cudaMem->cudaWeights, weights, dimensions * sizeof(double), hipMemcpyHostToDevice);
checkErrors(cudaStatus, cudaMem,
"hipMemcpy failed! cudaWeights");
pointsPerBlock = prop.maxThreadsPerBlock;
numOfBlocks = numOfPoints / prop.maxThreadsPerBlock;
remainBlock = numOfPoints % prop.maxThreadsPerBlock != 0;
hipLaunchKernelGGL(( trainKernel), dim3(numOfBlocks + remainBlock), dim3(pointsPerBlock) , 0, 0, cudaMem->cudaCoords, cudaWeights, w0, cudaResults, dimensions, numOfPoints, pointsPerBlock);
cudaStatus = hipGetLastError();
checkErrors(cudaStatus, cudaMem,
"classifyKernel failed!");
cudaStatus = hipDeviceSynchronize();
checkErrors(cudaStatus, cudaMem,
"hipDeviceSynchronize failed after classifyKernel!");
cudaStatus = hipMemcpy(results, cudaResults, numOfPoints * sizeof(double), hipMemcpyDeviceToHost);
checkErrors(cudaStatus, cudaMem,
"hipMemcpy failed! results");
return cudaStatus;
}
void freeCudaMem(CudaMemPtr* cudaMem)
{
hipFree(cudaMem->cudaCoords);
hipFree(cudaMem->cudaWeights);
hipFree(cudaMem->cudaResults);
}
void checkErrors(hipError_t cudaStatus,
CudaMemPtr* cudaMem, const char* message)
{
if (cudaStatus != hipSuccess)
{
fprintf(stderr, message);
hipFree(cudaMem->cudaCoords);
hipFree(cudaMem->cudaWeights);
hipFree(cudaMem->cudaResults);
}
}
| e900b32da270640f9d5bc857de98f06ee7476507.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "CudaMemPtr.h"
#include <stdio.h>
cudaError_t trainWithCuda(CudaMemPtr* cudaMem, const double* weights,
double w0, double* results);
void cudaAllocationAndCpy(CudaMemPtr* cudaMem, const double* coords, int numOfPoints, int dimensions);
void checkErrors(cudaError_t cudaStatus, CudaMemPtr* cudaMem, const char* errorMessage);
void freeCudaMem(CudaMemPtr* cudaMem);
__global__ void trainKernel(double *coords, const double * weights, double w0,
double *results, int dimensions, int numOfPoints, int pointsPerBlock);
__global__ void trainKernel(double *coords, const double * weights, double w0,
double *results, int dimensions, int numOfPoints, int pointsPerBlock)
{
int id = blockIdx.x * pointsPerBlock + threadIdx.x, i;
if (blockIdx.x == gridDim.x - 1 && numOfPoints % blockDim.x <= threadIdx.x)
return;
results[id] = w0;
for (i = 0; i < dimensions; i++)
{
results[id] += coords[id*dimensions + i] * weights[i];
}
}
void cudaAllocationAndCpy(CudaMemPtr* cudaMem, const double* coords, int numOfPoints, int dimensions)
{
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
checkErrors(cudaStatus, cudaMem,"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
cudaStatus = cudaMalloc((void**)&cudaMem->cudaCoords, numOfPoints * dimensions * sizeof(double));
checkErrors(cudaStatus, cudaMem, "cudaMalloc failed! cudaCoords");
cudaStatus = cudaMalloc((void**)&cudaMem->cudaWeights, dimensions * sizeof(double));
checkErrors(cudaStatus, cudaMem,"cudaMalloc failed! cudaWeights");
cudaStatus = cudaMalloc((void**)&cudaMem->cudaResults, numOfPoints * sizeof(double));
checkErrors(cudaStatus, cudaMem, "cudaMalloc failed! cudaWeights");
cudaStatus = cudaMemcpy(cudaMem->cudaCoords, coords, numOfPoints * dimensions * sizeof(double), cudaMemcpyHostToDevice);
checkErrors(cudaStatus, cudaMem, "cudaMemcpy failed! cudaCoords");
cudaMem->numOfPoints = numOfPoints;
cudaMem->dimensions = dimensions;
}
cudaError_t trainWithCuda(CudaMemPtr* cudaMem, const double* weights,
double w0, double* results)
{
double* cudaCoords = cudaMem->cudaCoords;
double* cudaWeights = cudaMem->cudaWeights;
double* cudaResults = cudaMem->cudaResults;
int numOfPoints = cudaMem->numOfPoints;
int dimensions = cudaMem->dimensions;
cudaError_t cudaStatus;
int numOfBlocks, remainBlock, pointsPerBlock;
cudaDeviceProp prop;
cudaMem->w0 = w0;
cudaGetDeviceProperties(&prop, 0);
cudaStatus = cudaMemcpy(cudaMem->cudaWeights, weights, dimensions * sizeof(double), cudaMemcpyHostToDevice);
checkErrors(cudaStatus, cudaMem,
"cudaMemcpy failed! cudaWeights");
pointsPerBlock = prop.maxThreadsPerBlock;
numOfBlocks = numOfPoints / prop.maxThreadsPerBlock;
remainBlock = numOfPoints % prop.maxThreadsPerBlock != 0;
trainKernel<<<numOfBlocks + remainBlock, pointsPerBlock >>>(cudaMem->cudaCoords, cudaWeights, w0, cudaResults, dimensions, numOfPoints, pointsPerBlock);
cudaStatus = cudaGetLastError();
checkErrors(cudaStatus, cudaMem,
"classifyKernel failed!");
cudaStatus = cudaDeviceSynchronize();
checkErrors(cudaStatus, cudaMem,
"cudaDeviceSynchronize failed after classifyKernel!");
cudaStatus = cudaMemcpy(results, cudaResults, numOfPoints * sizeof(double), cudaMemcpyDeviceToHost);
checkErrors(cudaStatus, cudaMem,
"cudaMemcpy failed! results");
return cudaStatus;
}
void freeCudaMem(CudaMemPtr* cudaMem)
{
cudaFree(cudaMem->cudaCoords);
cudaFree(cudaMem->cudaWeights);
cudaFree(cudaMem->cudaResults);
}
void checkErrors(cudaError_t cudaStatus,
CudaMemPtr* cudaMem, const char* message)
{
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, message);
cudaFree(cudaMem->cudaCoords);
cudaFree(cudaMem->cudaWeights);
cudaFree(cudaMem->cudaResults);
}
}
|
ab423e3f8dfba2dc7d693a0e1c6a14c43ea4e5a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_add_proj(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_a[idx]+d_b[idx];
}
__global__ void update(float *d_f, float *d_f_weightedLenSum , float *d_f_LenSum, float beta)
{
int Idx_image_x = threadIdx.x;
int Idx_image_y = blockIdx.x;
int Idx_image_z = blockIdx.y;
int image_voxel_index = Idx_image_z*M*N + Idx_image_y*M + Idx_image_x;
if (d_f_LenSum[image_voxel_index] > volumn_x*1e-3f)
d_f[image_voxel_index] += beta * d_f_weightedLenSum[image_voxel_index] / d_f_LenSum[image_voxel_index];
}
__global__ void forward_ray_driven_3d_kernel_correction_multiGPU(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int subPrjIdx, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta (only a portion of the whole projection view)
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
// subPrjIdx: sub projection portion index
int proj_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int proj_z_idx = blockIdx.y;
int proj_src_idx = blockIdx.z;
int proj_pixel_index = R*Z_prj/Number_of_Devices* proj_src_idx + R * proj_z_idx + proj_x_idx;
// X2 point coordinate in (x,y,z) system . the source position
float vertex_x2_x, vertex_x2_y, vertex_x2_z;
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z_min + proj_src_idx * Source_interval;
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x,vertex_x1_y,vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + proj_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + proj_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+proj_z_idx) * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
// Note: when (vertex_x2_z == vertex_x1_z), alpha_min = -inf, alpha_max = inf.
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
one_ray_length = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
// Note: this condition can be combined into either of the two branches.
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6f )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6f )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y >= vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z <= vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
if (one_ray_length < volumn_z*1e-6f)
d_proj_correction[proj_pixel_index] = 0.0f;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length; // projection correction (for SART)
}
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel_correction_multiGPU(float *d_f_weightedLenSum , float *d_f_LenSum , float *d_proj_correction, float sin_theta, float cos_theta, int subPrjIdx)
{
// d_f_weightedLenSum: 3D object array;
// d_f_LenSum: 3D object array;
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
/* Note:
* dim3 dimGrid2(1,Z_prj,N_source);
* dim3 dimBlock2(R/1,1);
*/
int proj_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int proj_z_idx = blockIdx.y;
int proj_src_idx = blockIdx.z;
int proj_pixel_index = R*Z_prj/Number_of_Devices* proj_src_idx + R * proj_z_idx + proj_x_idx;
float proj_val = d_proj_correction[proj_pixel_index];
// X2 point coordinate in (x,y,z) system . the source position
float vertex_x2_x, vertex_x2_y, vertex_x2_z;
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z_min + proj_src_idx * Source_interval;
//X1 point coordinate in (x,y,z) system --- detector pixel positions
float vertex_x1_x, vertex_x1_y, vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + proj_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + proj_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+proj_z_idx) * Detector_pixel_x;
// Notice: vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/***************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
// Note: when (vertex_x2_z == vertex_x1_z), alpha_min = -inf, alpha_max = inf.
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
alpha_min = (fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min))); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = (fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max))); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
{
}
else // if ( (alpha_max > alpha_min) && (alpha_min > 0.0f) )
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else// if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else if (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
}
else// if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else// if (vertex_x1_z >= vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,y,z)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x_1,alpha_y_1,alpha_z_1) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6 )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * (float)i_min +boundary_voxel_x - vertex_x1_x )*inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * (float)i_max +boundary_voxel_x - vertex_x1_x )*inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6 )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * (float)j_min +boundary_voxel_y - vertex_x1_y )*inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * (float)j_max +boundary_voxel_y - vertex_x1_y )*inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * (float)k_min +boundary_voxel_z - vertex_x1_z )*inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * (float)k_max +boundary_voxel_z - vertex_x1_z )*inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-6f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c));
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
// __syncthreads();
}
| ab423e3f8dfba2dc7d693a0e1c6a14c43ea4e5a3.cu | __global__ void kernel_add_proj(float *d_a, float *d_b)
{
int idx = blockDim.x * gridDim.x * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x;
d_a[idx]=d_a[idx]+d_b[idx];
}
__global__ void update(float *d_f, float *d_f_weightedLenSum , float *d_f_LenSum, float beta)
{
int Idx_image_x = threadIdx.x;
int Idx_image_y = blockIdx.x;
int Idx_image_z = blockIdx.y;
int image_voxel_index = Idx_image_z*M*N + Idx_image_y*M + Idx_image_x;
if (d_f_LenSum[image_voxel_index] > volumn_x*1e-3f)
d_f[image_voxel_index] += beta * d_f_weightedLenSum[image_voxel_index] / d_f_LenSum[image_voxel_index];
}
__global__ void forward_ray_driven_3d_kernel_correction_multiGPU(float *d_f , float *d_proj_correction, float *d_proj_data, float sin_theta, float cos_theta, int subPrjIdx, int command)
{
// d_f: 3D object array; d_f[i,j,k] = d_f [k*M*N+j*M+i];
// d_proj_data: 2D projection acquired at the angle of t_theta (only a portion of the whole projection view)
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
// subPrjIdx: sub projection portion index
int proj_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int proj_z_idx = blockIdx.y;
int proj_src_idx = blockIdx.z;
int proj_pixel_index = R*Z_prj/Number_of_Devices* proj_src_idx + R * proj_z_idx + proj_x_idx;
// X2 point coordinate in (x,y,z) system . the source position
float vertex_x2_x, vertex_x2_y, vertex_x2_z;
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z_min + proj_src_idx * Source_interval;
// Detector element center positions (X1): Coordinate in (x,y,z) system ---
float vertex_x1_x,vertex_x1_y,vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + proj_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + proj_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+proj_z_idx) * Detector_pixel_x;
// Notice: in this system, vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/*****************************************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float one_ray_sum = 0.0f;
float one_ray_length = 0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
int next_alpha_index;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
// Note: when (vertex_x2_z == vertex_x1_z), alpha_min = -inf, alpha_max = inf.
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
// alpha_min / alpha_max reused
alpha_min = fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min)); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max)); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
one_ray_length = 0.0f ;
else
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else //if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else //if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else //if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else //if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else //if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x)*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else //f (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else //if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y)*inv_volumn_y) ;
}
else //if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else //if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else //if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
k_min = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
k_max = floor(( vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
// Note: this condition can be combined into either of the two branches.
}
else if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else //if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else //if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else //if (vertex_x1_z > vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else //if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else //if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,alpha_y_1,alpha_z_1)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x,alpha_y,alpha_z) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6f )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * i_min + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * i_max + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6f )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * j_min + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y >= vertex_x2_y)
{
alpha_y = (volumn_y * j_max + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6f )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z <= vertex_x2_z)
{
alpha_z = (volumn_z * k_min + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * k_max + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
// Note: (alpha_x, alpha_y, alpha_z) the intersection where the path hit the next (i.e. 1st here ) x/y/z plane inside the recon
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-16f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
//(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i++;
voxel_i++;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i--; //(29)
voxel_i--;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j++;
voxel_j++;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j--;
voxel_j--;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
one_ray_length += d_x1_x2 * (alpha_z - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_z - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k++;
voxel_k++;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k--;
voxel_k--;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
one_ray_length += d_x1_x2 * (alpha_y - alpha_c);
one_ray_sum += d_x1_x2 * (alpha_y - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i];
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
one_ray_length += d_x1_x2 * (alpha_x - alpha_c); //(30)
one_ray_sum += d_x1_x2 * (alpha_x - alpha_c) * d_f[voxel_k*M*N + voxel_j*M + voxel_i]; //(31)
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
next_alpha_index = i+1;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
next_alpha_index = i-1;
}
alpha_x = (volumn_x * next_alpha_index + boundary_voxel_x - vertex_x1_x )* inv_x_diff;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
next_alpha_index = j+1;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
next_alpha_index = j-1;
}
alpha_y = (volumn_y * next_alpha_index + boundary_voxel_y - vertex_x1_y )* inv_y_diff;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
next_alpha_index = k+1;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
next_alpha_index = k-1;
}
alpha_z = (volumn_z * next_alpha_index + boundary_voxel_z - vertex_x1_z )* inv_z_diff;
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
if (one_ray_length < volumn_z*1e-6f)
d_proj_correction[proj_pixel_index] = 0.0f;
else
{
if (command == 0)
d_proj_correction[proj_pixel_index] = one_ray_sum; // forward operator
else if (command == 1)
d_proj_correction[proj_pixel_index] = (d_proj_data[proj_pixel_index] - one_ray_sum)/one_ray_length; // projection correction (for SART)
}
// __syncthreads();
}
__global__ void backprj_ray_driven_3d_kernel_correction_multiGPU(float *d_f_weightedLenSum , float *d_f_LenSum , float *d_proj_correction, float sin_theta, float cos_theta, int subPrjIdx)
{
// d_f_weightedLenSum: 3D object array;
// d_f_LenSum: 3D object array;
// d_proj_data: 2D projection acquired at the angle of t_theta
// d_proj_correction: 2D projection correction, (output of this function. i.e. c(i) in the paper)
/* Note:
* dim3 dimGrid2(1,Z_prj,N_source);
* dim3 dimBlock2(R/1,1);
*/
int proj_x_idx = threadIdx.x + blockDim.x * blockIdx.x;
int proj_z_idx = blockIdx.y;
int proj_src_idx = blockIdx.z;
int proj_pixel_index = R*Z_prj/Number_of_Devices* proj_src_idx + R * proj_z_idx + proj_x_idx;
float proj_val = d_proj_correction[proj_pixel_index];
// X2 point coordinate in (x,y,z) system . the source position
float vertex_x2_x, vertex_x2_y, vertex_x2_z;
vertex_x2_x = Source_x * cos_theta - Source_y * sin_theta;
vertex_x2_y = Source_x * sin_theta + Source_y * cos_theta;
vertex_x2_z = Source_z_min + proj_src_idx * Source_interval;
//X1 point coordinate in (x,y,z) system --- detector pixel positions
float vertex_x1_x, vertex_x1_y, vertex_x1_z;
vertex_x1_x = DOD * cos_theta - (Detector_Ymin + proj_x_idx * Detector_pixel_x) * sin_theta;
vertex_x1_y = DOD * sin_theta + (Detector_Ymin + proj_x_idx * Detector_pixel_x) * cos_theta;
vertex_x1_z = Detector_Zmin + (Z_prj/Number_of_Devices*subPrjIdx+proj_z_idx) * Detector_pixel_x;
// Notice: vertex_x1_x < 0 < vertex_x2_x
float inv_x_diff = 1.0f / (vertex_x2_x - vertex_x1_x);
float inv_y_diff = 1.0f / (vertex_x2_y - vertex_x1_y);
float inv_z_diff = 1.0f / (vertex_x2_z - vertex_x1_z);
/***************/
float alpha_x_min= 0.0f, alpha_y_min= 0.0f, alpha_z_min= 0.0f;
float alpha_x_max= 0.0f, alpha_y_max= 0.0f, alpha_z_max= 0.0f;
float alpha_min= 0.0f, alpha_max= 0.0f;
int i_min=0, j_min=0, k_min=0;
int i_max=0, j_max=0, k_max=0;
int i=0, j=0, k=0;
int voxel_i=0, voxel_j=0, voxel_k=0;
float alpha_x=0.0f, alpha_y=0.0f, alpha_z=0.0f;
float alpha_c= 0.0f;
float d_x1_x2= 0.0f;
int N_total_sec=0;
/**** Step 1 :find out alpha_min, alpha_max ********/
alpha_min = (boundary_voxel_x + volumn_x*0 - vertex_x1_x )* inv_x_diff; //(9)
alpha_max = (boundary_voxel_x + volumn_x*M - vertex_x1_x )* inv_x_diff;
// Notice: it is still unsure here which one is the parametric value of the first intersection point of the ray with the x-plane
// It depends on whether source or detector lies on the left side of the reconstruction region at this time
alpha_x_min = fmin(alpha_min, alpha_max); //(5)
alpha_x_max = fmax(alpha_min, alpha_max ); //(6)
alpha_min = (boundary_voxel_y + volumn_y*0 - vertex_x1_y )* inv_y_diff;
alpha_max = (boundary_voxel_y + volumn_y*N - vertex_x1_y )* inv_y_diff;
alpha_y_min = fmin(alpha_min, alpha_max); //(7)
alpha_y_max = fmax(alpha_min, alpha_max ); //(8)
alpha_min = (boundary_voxel_z + volumn_z*0 - vertex_x1_z )* inv_z_diff;
alpha_max = (boundary_voxel_z + volumn_z*ZETA - vertex_x1_z )* inv_z_diff;
// Note: when (vertex_x2_z == vertex_x1_z), alpha_min = -inf, alpha_max = inf.
alpha_z_min = fmin(alpha_min, alpha_max);
alpha_z_max = fmax(alpha_min, alpha_max );
alpha_min = (fmax(fmax(alpha_x_min, alpha_y_min), fmax(alpha_y_min, alpha_z_min))); //(3)
// i.e. alpha_min = fmax(alpha_x_min,alpha_y_min,alpha_z_min)
// it indicates the point where the path interacts with the near boundary of reconstruction region
alpha_max = (fmin(fmin(alpha_x_max, alpha_y_max), fmin(alpha_y_max, alpha_z_max))); //(4)
// i.e. alpha_max = fmin(alpha_x_max,alpha_y_max,alpha_z_max)
// it indicates the point where the path last interacts with the far boundary of reconstruction region
/********Step 2,3: Find i_max, i_min***************/
if (alpha_max <= alpha_min) // It means no interaction of the ray and the volume
{
}
else // if ( (alpha_max > alpha_min) && (alpha_min > 0.0f) )
{
// X direction
if (vertex_x1_x < vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_min = 1; //(11)
else if (alpha_min != alpha_x_min)
i_min = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) + 1 ;
//(12)
/* Note: i_min is the index of the 1st x plane where the path interacts inside the reconstruction region
* It is not the index of alpha_x_min
*/
if (alpha_max == alpha_x_max)
i_max = M; //(13)
else if (alpha_max != alpha_x_max)
i_max = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) ;
//(14)
// Note: i_max is the index of the last x plane where the path interacts with the reconstruction region (inside or boundary)
}
else// if (vertex_x1_x >= vertex_x2_x)
{
if (alpha_min == alpha_x_min)
i_max = M-1; //(15)
else if (alpha_min != alpha_x_min)
i_max = floor(( alpha_min*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) ;
//(16)
if (alpha_max == alpha_x_max)
i_min = 0; //(17)
else if (alpha_max != alpha_x_max)
i_min = floor(( alpha_max*(vertex_x2_x - vertex_x1_x) + vertex_x1_x - boundary_voxel_x )*inv_volumn_x) + 1 ;
//(18)
}
// Note: overall, i_min is the most left x-plane, i_max the most right x-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Y direction
if (vertex_x1_y < vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_min = 1;
else if (alpha_min != alpha_y_min)
j_min = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
if (alpha_max == alpha_y_max)
j_max = N;
else if (alpha_max != alpha_y_max)
j_max = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
}
else// if (vertex_x1_y >= vertex_x2_y)
{
if (alpha_min == alpha_y_min)
j_max = N-1;
else if (alpha_min != alpha_y_min)
j_max = floor(( alpha_min*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) ;
if (alpha_max == alpha_y_max)
j_min = 0;
else if (alpha_max != alpha_y_max)
j_min = floor(( alpha_max*(vertex_x2_y - vertex_x1_y) + vertex_x1_y - boundary_voxel_y )*inv_volumn_y) + 1 ;
}
// Note: overall, j_min is the most bottom y-plane, j_max the most top y-plane,
// and the initial point (the first interacted position on the boundary) NOT included.
//Z direction
if (vertex_x1_z < vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_min = 1;
else if (alpha_min != alpha_z_min)
k_min = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) + 1 ;
if (alpha_max == alpha_z_max)
k_max = ZETA;
else if (alpha_max != alpha_z_max)
k_max = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
}
else// if (vertex_x1_z >= vertex_x2_z)
{
if (alpha_min == alpha_z_min)
k_max = ZETA-1;
else if (alpha_min != alpha_z_min)
k_max = floor(( alpha_min*(vertex_x2_z - vertex_x1_z) + vertex_x1_z - boundary_voxel_z )*inv_volumn_z) ;
if (alpha_max == alpha_z_max)
k_min = 0;
else if (alpha_max != alpha_z_max)
k_min = floor(( alpha_max*(vertex_x2_z - vertex_x1_z) + vertex_x1_z -boundary_voxel_z )*inv_volumn_z) + 1 ;
}
/************ initialization (i,j,k) (alpha_x_1,y,z)**************************/
// Note: (i,j,k) is the current x,y,z plane index (@ the initial point at the boundary)
// Note: (alpha_x_1,alpha_y_1,alpha_z_1) is the next x,y,z plane to go.
N_total_sec = i_max - i_min + 1 + j_max - j_min +1 + k_max-k_min +1;
// i.e. N_p (25)
if (fabs(vertex_x1_x-vertex_x2_x)<volumn_x*1e-6 )
{
alpha_x = MAX_infi;
i = i_min-1;
}
else if (vertex_x1_x < vertex_x2_x)
{
alpha_x = (volumn_x * (float)i_min +boundary_voxel_x - vertex_x1_x )*inv_x_diff;
i = i_min - 1;
}
else if (vertex_x1_x > vertex_x2_x)
{
alpha_x = (volumn_x * (float)i_max +boundary_voxel_x - vertex_x1_x )*inv_x_diff;
i = i_max + 1;
}
// Note: alpha_x_1 is the intersection where the path hit the 1st x plane inside the recon region
if (fabs(vertex_x1_y-vertex_x2_y)<volumn_y*1e-6 )
{
alpha_y = MAX_infi;
j = j_min-1;
}
else if (vertex_x1_y < vertex_x2_y)
{
alpha_y = (volumn_y * (float)j_min +boundary_voxel_y - vertex_x1_y )*inv_y_diff;
j = j_min - 1;
}
else if (vertex_x1_y > vertex_x2_y)
{
alpha_y = (volumn_y * (float)j_max +boundary_voxel_y - vertex_x1_y )*inv_y_diff;
j = j_max + 1;
}
// Note: alpha_y_1 is the intersection where the path hit the 1st y plane inside the recon region
if (fabs(vertex_x1_z-vertex_x2_z)<volumn_z*1e-6 )
{
alpha_z = MAX_infi;
k = k_min-1;
}
else if (vertex_x1_z < vertex_x2_z)
{
alpha_z = (volumn_z * (float)k_min +boundary_voxel_z - vertex_x1_z )*inv_z_diff;
k = k_min - 1;
}
else if (vertex_x1_z > vertex_x2_z)
{
alpha_z = (volumn_z * (float)k_max +boundary_voxel_z - vertex_x1_z )*inv_z_diff;
k = k_max + 1;
}
/************ initialization (voxel_i,voxel_j,voxel_k) **************************/
// Note: (voxel_i,voxel_j,voxel_k) is the current x,y,z voxel index (@ the initial point at the boundary)
if (vertex_x1_x < vertex_x2_x)
voxel_i = i_min-1;
else
voxel_i = i_max;
if (vertex_x1_y < vertex_x2_y)
voxel_j = j_min-1;
else
voxel_j = j_max;
if (vertex_x1_z < vertex_x2_z)
voxel_k = k_min-1;
else
voxel_k = k_max;
/***************** Updating alpha_x, alpha_y, alpha_z, ************************/
d_x1_x2 = sqrt((vertex_x2_x-vertex_x1_x)*(vertex_x2_x-vertex_x1_x) + (vertex_x2_y-vertex_x1_y)*(vertex_x2_y - vertex_x1_y) + (vertex_x2_z-vertex_x1_z)*(vertex_x2_z-vertex_x1_z) );
alpha_c = alpha_min; // intersection where the path hit the 1st plane at the boundary of recon region
// Note : (i,j,k) is the (x,y,z) plane index of the current intersection (with a certain plane)
// If i or j or k should not be an integer, then its predecessor (along the ray)
while (alpha_max - alpha_c > 1e-6f)
{
if ((voxel_i > M-1)||(voxel_i <0) || (voxel_j > N-1)||(voxel_j <0) || (voxel_k > ZETA-1)||(voxel_k <0))
{
alpha_c = alpha_max +1; // to terminate the loop
}
else
{
if ( (alpha_x < alpha_y) && (alpha_x < alpha_z))
// alpha_x is the nearest, so update alpha_x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 1;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
}
else if ( (alpha_y < alpha_x) && (alpha_y < alpha_z) )
// alpha_y is the nearest, so update alpha_y
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -1;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
}
else if ( (alpha_z < alpha_x) && (alpha_z < alpha_y) )
// alpha_z is the nearest, so update alpha_z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_z - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_z - alpha_c));
alpha_c = alpha_z;
N_total_sec = N_total_sec -1;
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
else if ( (alpha_x == alpha_y) && (alpha_x < alpha_z) )
//x = y < z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
}
else if ( (alpha_x == alpha_z) && (alpha_x < alpha_y))// && (sphere_range<=1.0f) )
// x = z < y;
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 2;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
else if ( (alpha_y == alpha_z) && (alpha_y < alpha_x))// && (sphere_range<=1.0f) )
// y = z < x
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c)* proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_y - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_y - alpha_c));
alpha_c = alpha_y;
N_total_sec = N_total_sec -2;
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
else if ( (alpha_x == alpha_z) && (alpha_x == alpha_y))// && (sphere_range<=1.0f) )
// x=y=z
{
// d_f_weightedLenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c) * proj_val;
// d_f_LenSum[voxel_k*M*N + voxel_j*M + voxel_i]+=d_x1_x2 * (alpha_x - alpha_c);
atomicAdd(d_f_weightedLenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c) * proj_val);
atomicAdd(d_f_LenSum + voxel_k*M*N + voxel_j*M + voxel_i, d_x1_x2 * (alpha_x - alpha_c));
alpha_c = alpha_x; //(33) Update the current location
N_total_sec = N_total_sec - 3;
if (vertex_x1_x < vertex_x2_x)
{
i = i + 1;
voxel_i = voxel_i +1;
alpha_x = (volumn_x * (i+1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_x > vertex_x2_x)
{
i = i - 1; //(29)
voxel_i = voxel_i-1;
alpha_x = (volumn_x * (i-1) + boundary_voxel_x - vertex_x1_x )*inv_x_diff;
}
if (vertex_x1_y < vertex_x2_y)
{
j = j + 1;
voxel_j = voxel_j+1;
alpha_y = (volumn_y * (j+1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
else if (vertex_x1_y > vertex_x2_y)
{
j = j - 1;
voxel_j = voxel_j-1;
alpha_y = (volumn_y * (j-1) + boundary_voxel_y - vertex_x1_y )*inv_y_diff;
}
if (vertex_x1_z < vertex_x2_z)
{
k = k + 1;
voxel_k = voxel_k+1;
alpha_z = (volumn_z * (k+1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
else if (vertex_x1_z > vertex_x2_z)
{
k = k - 1;
voxel_k = voxel_k-1;
alpha_z = (volumn_z * (k-1) + boundary_voxel_z - vertex_x1_z )*inv_z_diff;
}
}
}
}// end tracing the ray
}//end if the ray interacts with the volume
// __syncthreads();
}
|
65e839a59717f9a2a0a2c338c8b0ae797d234b0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS _THREADS_
__device__ void _increment_hit_count(
const unsigned int grid_size,
const unsigned int i,
const unsigned int j,
const bool *grid,
const float *massx,
const float *massy,
const int *neigh,
int *hits
){
const float one = 1.0f/(float)grid_size;
const int k = i*grid_size+j;
float x = (float)i*one+massx[k]*one;
float y = (float)j*one+massy[k]*one;
unsigned int ii = (unsigned int)round(x*grid_size);
unsigned int jj = (unsigned int)round(y*grid_size);
atomicAdd(&hits[ii*grid_size+jj], 1);
return;
}
__global__ void agg(
const int n,
const unsigned int grid_size,
const bool *grid,
const float *massx,
const float *massy,
const int *neigh,
int *hits
){
const unsigned int ij = blockIdx.x*THREADS + threadIdx.x;
const unsigned int i = ij/grid_size;
const unsigned int j = ij%grid_size;
if (ij>=n){
return;
}
if (neigh[ij]<1){
return;
}
if (!grid[ij]){
return;
}
_increment_hit_count(grid_size, i, j, grid, massx, massy, neigh, hits);
}
| 65e839a59717f9a2a0a2c338c8b0ae797d234b0f.cu | #define THREADS _THREADS_
__device__ void _increment_hit_count(
const unsigned int grid_size,
const unsigned int i,
const unsigned int j,
const bool *grid,
const float *massx,
const float *massy,
const int *neigh,
int *hits
){
const float one = 1.0f/(float)grid_size;
const int k = i*grid_size+j;
float x = (float)i*one+massx[k]*one;
float y = (float)j*one+massy[k]*one;
unsigned int ii = (unsigned int)round(x*grid_size);
unsigned int jj = (unsigned int)round(y*grid_size);
atomicAdd(&hits[ii*grid_size+jj], 1);
return;
}
__global__ void agg(
const int n,
const unsigned int grid_size,
const bool *grid,
const float *massx,
const float *massy,
const int *neigh,
int *hits
){
const unsigned int ij = blockIdx.x*THREADS + threadIdx.x;
const unsigned int i = ij/grid_size;
const unsigned int j = ij%grid_size;
if (ij>=n){
return;
}
if (neigh[ij]<1){
return;
}
if (!grid[ij]){
return;
}
_increment_hit_count(grid_size, i, j, grid, massx, massy, neigh, hits);
}
|
d65130b1ca57ec254da1900b0f1d99766496b70f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include<stdlib.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main()
{
int N = 1<<20;
N++;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| d65130b1ca57ec254da1900b0f1d99766496b70f.cu | #include <stdio.h>
#include<stdlib.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main()
{
int N = 1<<20;
N++;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
83835b76d98609a9fac1c0a4c23cce8d82008e1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
typedef unsigned long ulint;
typedef unsigned long long ulint64;
int banyakdata = 335544320;
int dimensigrid = 327680;
int dimensiblok = 1024;
__host__ __device__ void modexp(ulint a, ulint b, ulint c, ulint* res) {
ulint64 s = a;
ulint64 ans = 1;
while (b != 0) {
if (b % 2 == 1) {
ans = ans * s % c;
b--;
}
b /= 2;
if (b != 0) {
s = s * s %c;
}
}
*res = ans;
}
__device__ void enkripsi(ulint g, ulint k, ulint p, ulint m, ulint y, ulint *res) {
modexp(g, k, p, res);
modexp(y, k, p, res + 1);
*(res + 1) = *(res + 1) * m % p;
}
__device__ void dekripsi(ulint a, ulint b, ulint p, ulint e, ulint *res) {
modexp(a, e, p, res);
*res = *res * b % p;
}
__global__ void kernelenk(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
enkripsi(g, k[i], p, m[i], y, res + 2 * i);
}
__global__ void kerneldek(ulint *c, ulint p, ulint e, ulint *res) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
dekripsi(c[2*i], c[2*i+1], p, e, res + i);
}
void enkripsiCUDA(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) {
//=====================BAGIAN M[] K[] DAN RES[] ====================================//
ulint *devm, *devk, *devres;
hipMalloc((void**)&devm, banyakdata * sizeof(ulint));
hipMalloc((void**)&devk, banyakdata * sizeof(ulint));
hipMalloc((void**)&devres, banyakdata * 2 * sizeof(ulint));
hipMemcpy((devm), m, (sizeof(ulint) * banyakdata), hipMemcpyHostToDevice);
hipMemcpy((devk), k, (sizeof(ulint) * banyakdata), hipMemcpyHostToDevice);
kernelenk << <dimensigrid, dimensiblok>> >(devm,devk,g,p,y,devres);
hipDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
hipMemcpy(res, devres, (sizeof(ulint) * 2 * banyakdata), hipMemcpyDeviceToHost);
hipFree(devm);
hipFree(devk);
hipFree(devres);
}
void initenkripsi(ulint *m, ulint *k){
for (int i = 0; i < banyakdata; i++) {
m[i] = rand() % 3999999978;
k[i] = rand() % 3999999978;
}
}
int main(){
ulint *m, *k, *res, g, p, y, x;
m = (ulint*)malloc(banyakdata * sizeof(ulint));
k = (ulint*)malloc(banyakdata * sizeof(ulint));
res = (ulint*)malloc(banyakdata * 2 * sizeof(ulint));
srand(2018);
g = rand() % 3999999978;
p = 3999999979;
x = rand() % 3999999978;
modexp(g,x,p,&y);
initenkripsi(m, k);
enkripsiCUDA(m,k,g,p,y,res);
// printf("<<<<<<<<<<<<<<Hasil Enkripsi>>>>>>>>>>>>>>>\n");
// for (int i = 0; i < 4; i++) {
// printf("c[%d] = %lu c[%d] = %lu\n", 2*i, res[2*i], 2*i+1, res[2*i+1]);
// }
// printf("c ...\n");
// printf("c[%d] = %lu c[%d] = %lu\n", banyakdata * 2-2, res[banyakdata * 2-2], banyakdata *2-1,res[banyakdata*2-1]);
// printf("<<<<<<<<<<<<<<Hasil Dekripsi>>>>>>>>>>>>>>>\n");
// for (int i = 0; i < 4; i++) {
// printf("m[%d] = %lu\n", i, res2[i]);
// }
// printf("m[...]\n");
// printf("m[%d] = %lu\n", banyakdata-1, res2[banyakdata-1]);
free(m);
free(k);
free(res);
return 0;
}
| 83835b76d98609a9fac1c0a4c23cce8d82008e1e.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
typedef unsigned long ulint;
typedef unsigned long long ulint64;
int banyakdata = 335544320;
int dimensigrid = 327680;
int dimensiblok = 1024;
__host__ __device__ void modexp(ulint a, ulint b, ulint c, ulint* res) {
ulint64 s = a;
ulint64 ans = 1;
while (b != 0) {
if (b % 2 == 1) {
ans = ans * s % c;
b--;
}
b /= 2;
if (b != 0) {
s = s * s %c;
}
}
*res = ans;
}
__device__ void enkripsi(ulint g, ulint k, ulint p, ulint m, ulint y, ulint *res) {
modexp(g, k, p, res);
modexp(y, k, p, res + 1);
*(res + 1) = *(res + 1) * m % p;
}
__device__ void dekripsi(ulint a, ulint b, ulint p, ulint e, ulint *res) {
modexp(a, e, p, res);
*res = *res * b % p;
}
__global__ void kernelenk(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
enkripsi(g, k[i], p, m[i], y, res + 2 * i);
}
__global__ void kerneldek(ulint *c, ulint p, ulint e, ulint *res) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
dekripsi(c[2*i], c[2*i+1], p, e, res + i);
}
void enkripsiCUDA(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) {
//=====================BAGIAN M[] K[] DAN RES[] ====================================//
ulint *devm, *devk, *devres;
cudaMalloc((void**)&devm, banyakdata * sizeof(ulint));
cudaMalloc((void**)&devk, banyakdata * sizeof(ulint));
cudaMalloc((void**)&devres, banyakdata * 2 * sizeof(ulint));
cudaMemcpy((devm), m, (sizeof(ulint) * banyakdata), cudaMemcpyHostToDevice);
cudaMemcpy((devk), k, (sizeof(ulint) * banyakdata), cudaMemcpyHostToDevice);
kernelenk << <dimensigrid, dimensiblok>> >(devm,devk,g,p,y,devres);
cudaDeviceSynchronize();
// COPY FROM DEVICE TO HOST HERE
cudaMemcpy(res, devres, (sizeof(ulint) * 2 * banyakdata), cudaMemcpyDeviceToHost);
cudaFree(devm);
cudaFree(devk);
cudaFree(devres);
}
void initenkripsi(ulint *m, ulint *k){
for (int i = 0; i < banyakdata; i++) {
m[i] = rand() % 3999999978;
k[i] = rand() % 3999999978;
}
}
int main(){
ulint *m, *k, *res, g, p, y, x;
m = (ulint*)malloc(banyakdata * sizeof(ulint));
k = (ulint*)malloc(banyakdata * sizeof(ulint));
res = (ulint*)malloc(banyakdata * 2 * sizeof(ulint));
srand(2018);
g = rand() % 3999999978;
p = 3999999979;
x = rand() % 3999999978;
modexp(g,x,p,&y);
initenkripsi(m, k);
enkripsiCUDA(m,k,g,p,y,res);
// printf("<<<<<<<<<<<<<<Hasil Enkripsi>>>>>>>>>>>>>>>\n");
// for (int i = 0; i < 4; i++) {
// printf("c[%d] = %lu c[%d] = %lu\n", 2*i, res[2*i], 2*i+1, res[2*i+1]);
// }
// printf("c ...\n");
// printf("c[%d] = %lu c[%d] = %lu\n", banyakdata * 2-2, res[banyakdata * 2-2], banyakdata *2-1,res[banyakdata*2-1]);
// printf("<<<<<<<<<<<<<<Hasil Dekripsi>>>>>>>>>>>>>>>\n");
// for (int i = 0; i < 4; i++) {
// printf("m[%d] = %lu\n", i, res2[i]);
// }
// printf("m[...]\n");
// printf("m[%d] = %lu\n", banyakdata-1, res2[banyakdata-1]);
free(m);
free(k);
free(res);
return 0;
}
|
e7a167467a88ea49ee94242fa89fc2b11ce3f57c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include "matrixmul_kernel.hip"
// Predefining TILE_WIDTH
#define TILE_WIDTH 16
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
bool CompareMatrices(Matrix A, Matrix B);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
int ReadParamsFile(int* params, char* file_name, int num_params);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(3 * sizeof(int));
unsigned data_read = ReadParamsFile(params, argv[1], 3);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
unsigned sizeM = ReadFile(&M, argv[2]);
unsigned sizeN = ReadFile(&N, argv[3]);
if( (sizeM != M.height * M.width) || (sizeN != N.height * N.width) )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// check if the device result is equivalent to the expected solution
bool res = CompareMatrices(reference, P);
printf("Test %s\n", res ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
int blocks_w = N.width/TILE_WIDTH ;
int blocks_h = M.height /TILE_WIDTH;
if(M.width % TILE_WIDTH)
{
blocks_w ++;
}
if(M.height % TILE_WIDTH)
{
blocks_h ++;
}
dim3 dimGrid(blocks_w, blocks_h, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
// Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++) {
fprintf(output, "%f ", M.elements[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.0001f)
return false;
return true;
}
| e7a167467a88ea49ee94242fa89fc2b11ce3f57c.cu | /* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, kernels
#include "matrixmul_kernel.cu"
// Predefining TILE_WIDTH
#define TILE_WIDTH 16
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
bool CompareMatrices(Matrix A, Matrix B);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
int ReadParamsFile(int* params, char* file_name, int num_params);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(3 * sizeof(int));
unsigned data_read = ReadParamsFile(params, argv[1], 3);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
unsigned sizeM = ReadFile(&M, argv[2]);
unsigned sizeN = ReadFile(&N, argv[3]);
if( (sizeM != M.height * M.width) || (sizeN != N.height * N.width) )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// check if the device result is equivalent to the expected solution
bool res = CompareMatrices(reference, P);
printf("Test %s\n", res ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
int blocks_w = N.width/TILE_WIDTH ;
int blocks_h = M.height /TILE_WIDTH;
if(M.width % TILE_WIDTH)
{
blocks_w ++;
}
if(M.height % TILE_WIDTH)
{
blocks_h ++;
}
dim3 dimGrid(blocks_w, blocks_h, 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
// Launch the device computation threads!
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++) {
fprintf(output, "%f ", M.elements[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.0001f)
return false;
return true;
}
|
03297283c2570dbe719b135539d3c81217b91400.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zgeelltmv.cu normal z -> d, Fri Jul 18 17:34:27 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELL SpMV kernel
//Michael Garland
__global__ void
dgeelltmv_kernel( int num_rows,
int num_cols,
int num_cols_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0)
dot += val * d_x[col ];
}
d_y[ row ] = dot * alpha + beta * d_y [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
dgeelltmv_kernel_shift( int num_rows,
int num_cols,
int num_cols_per_row,
double alpha,
double lambda,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
int offset,
int blocksize,
magma_index_t *add_rows,
double *d_y){
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0)
dot += val * d_x[col ];
}
if( row<blocksize )
d_y[ row ] = dot * alpha - lambda
* d_x[ offset+row ] + beta * d_y [ row ];
else
d_y[ row ] = dot * alpha - lambda
* d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
d_y double*
input/output vector y
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
hipLaunchKernelGGL(( dgeelltmv_kernel), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream ,
m, n, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
lambda double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
offset magma_int_t
in case not the main diagonal is scaled
@param
blocksize magma_int_t
in case of processing multiple vectors
@param
add_rows magma_int_t*
in case the matrixpowerskernel is used
@param
d_y double*
input/output vector y
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgeelltmv_shift( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
double lambda,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
int offset,
int blocksize,
magma_index_t *add_rows,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
double tmp_shift;
//magma_dsetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
hipLaunchKernelGGL(( dgeelltmv_kernel_shift), dim3(grid), dim3(BLOCK_SIZE), 0, magma_stream ,
m, n, nnz_per_row, alpha, tmp_shift, d_val, d_colind, d_x,
beta, offset, blocksize, add_rows, d_y );
return MAGMA_SUCCESS;
}
| 03297283c2570dbe719b135539d3c81217b91400.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zgeelltmv.cu normal z -> d, Fri Jul 18 17:34:27 2014
*/
#include "common_magma.h"
#if (GPUSHMEM < 200)
#define BLOCK_SIZE 128
#else
#define BLOCK_SIZE 512
#endif
// ELL SpMV kernel
//Michael Garland
__global__ void
dgeelltmv_kernel( int num_rows,
int num_cols,
int num_cols_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y)
{
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0)
dot += val * d_x[col ];
}
d_y[ row ] = dot * alpha + beta * d_y [ row ];
}
}
// shifted ELL SpMV kernel
//Michael Garland
__global__ void
dgeelltmv_kernel_shift( int num_rows,
int num_cols,
int num_cols_per_row,
double alpha,
double lambda,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
int offset,
int blocksize,
magma_index_t *add_rows,
double *d_y){
int row = blockDim.x * blockIdx.x + threadIdx.x ;
if(row < num_rows ){
double dot = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row ; n ++){
int col = d_colind [ num_rows * n + row ];
double val = d_val [ num_rows * n + row ];
if( val != 0)
dot += val * d_x[col ];
}
if( row<blocksize )
d_y[ row ] = dot * alpha - lambda
* d_x[ offset+row ] + beta * d_y [ row ];
else
d_y[ row ] = dot * alpha - lambda
* d_x[ add_rows[row-blocksize] ] + beta * d_y [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
d_y double*
input/output vector y
@ingroup magmasparse_d
********************************************************************/
extern "C" magma_int_t
magma_dgeelltmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
dgeelltmv_kernel<<< grid, BLOCK_SIZE, 0, magma_stream >>>
( m, n, nnz_per_row, alpha, d_val, d_colind, d_x, beta, d_y );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELL.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
nnz_per_row magma_int_t
number of elements in the longest row
@param
alpha double
scalar multiplier
@param
lambda double
scalar multiplier
@param
d_val double*
array containing values of A in ELL
@param
d_colind magma_int_t*
columnindices of A in ELL
@param
d_x double*
input vector x
@param
beta double
scalar multiplier
@param
offset magma_int_t
in case not the main diagonal is scaled
@param
blocksize magma_int_t
in case of processing multiple vectors
@param
add_rows magma_int_t*
in case the matrixpowerskernel is used
@param
d_y double*
input/output vector y
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dgeelltmv_shift( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
double alpha,
double lambda,
double *d_val,
magma_index_t *d_colind,
double *d_x,
double beta,
int offset,
int blocksize,
magma_index_t *add_rows,
double *d_y ){
dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1);
double tmp_shift;
//magma_dsetvector(1,&lambda,1,&tmp_shift,1);
tmp_shift = lambda;
dgeelltmv_kernel_shift<<< grid, BLOCK_SIZE, 0, magma_stream >>>
( m, n, nnz_per_row, alpha, tmp_shift, d_val, d_colind, d_x,
beta, offset, blocksize, add_rows, d_y );
return MAGMA_SUCCESS;
}
|
4378109374df3e1c7558d5330f104849e82395db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <windows.h>
using namespace std;
hipError_t TransposeNaive(int *Imatrix, int *Nmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU);
hipError_t TransposeShared(int *Imatrix, int *Smatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU);
hipError_t TransposeSharedNoConflict(int *Imatrix, int *Cmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU);
hipError_t IsTransposed(int *Omatrix, int *Tmatrix, int size_x, int size_y, bool* good);
int const TILE = 32;
__global__ void IsTransposedGPU(int*Omatrix, int*Tmatrix, int size_x, int size_y, bool *good) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size_x*size_y)
if (Omatrix[index] != Tmatrix[index])
good = false;
}
__global__ void TransposeNaiveGPU(int* Imatrix, int* Nmatrix, int size_x, int size_y)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
int x = index % size_x;
int y = index / size_x;
if (x < size_x)
if (y < size_y) {
Nmatrix[x*size_y + y] = Imatrix[y*size_x + x];
}
}
__global__ void TransposeNaiveGPU2D(int* Imatrix, int* Nmatrix, int size_x, int size_y)
{
int x = blockIdx.x*TILE + threadIdx.x;
int y = blockIdx.y*TILE + threadIdx.y;
if (x < size_x)
if (y < size_y)
Nmatrix[x*size_y + y] = Imatrix[y*size_x + x];
}
__global__ void TransposeSharedGPU(int* Imatrix, int* Smatrix, int size_x, int size_y)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
int x = index % size_x;
int y = index / size_x;
if (x < size_x)
if (y < size_y) {
int Tindex = threadIdx.x;
extern __shared__ int temp[];
int Tx = Tindex % TILE;
int Ty = Tindex / TILE;
temp[Ty* TILE + Tx] = Imatrix[y*size_x + x];
__syncthreads();
Smatrix[x*size_y + y] = temp[Ty* TILE + Tx];
}
}
__global__ void TransposeSharedGPU2D(int* Imatrix, int* Smatrix, int size_x, int size_y)
{
int x = blockIdx.x*TILE + threadIdx.x;
int y = blockIdx.y*TILE + threadIdx.y;
if (x < size_x)
if (y < size_y) {
int Tindex = threadIdx.x;
__shared__ int temp[TILE][TILE];
int Tx = threadIdx.x;
int Ty = threadIdx.y;
temp[Tx][Ty] = Imatrix[y*size_x + x];
__syncthreads();
Smatrix[x*size_y + y] = temp[Tx][Ty];
}
}
__global__ void TransposeSharedNoConflictGPU2D(int* Imatrix, int* Smatrix, int size_x, int size_y)
{
int x = blockIdx.x*TILE + threadIdx.x;
int y = blockIdx.y*TILE + threadIdx.y;
if (x < size_x)
if (y < size_y) {
int Tindex = threadIdx.x;
__shared__ int temp[TILE][TILE + 1];
int Tx = threadIdx.x;
int Ty = threadIdx.y;
temp[Ty][Tx] = Imatrix[y*size_x + x];
__syncthreads();
Smatrix[x*size_y + y] = temp[Ty][Tx];
}
}
void TransposeCPU(int**Imatrix, int**Pmatrix, int size_x, int size_y) {
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
Pmatrix[j][i] = Imatrix[i][j];
}
void ShowMatrix(int** matrix, int size_x, int size_y, bool large) {
if (large) {
size_x = 16;
size_y = 16;
}
for (int i = 0; i < size_y; i++) {
for (int j = 0; j < size_x; j++) {
cout << matrix[i][j] << "\t";
}
cout << endl;
}
}
void Zero(int**Pmatrix, int size_x, int size_y) {
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
Pmatrix[i][j] = 0;;
}
void IsTransposedCPU(int**Omatrix, int**Tmatrix, int size_y, int size_x, bool*good) {
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
if (Omatrix[i][j] != Tmatrix[i][j])
*good = false;
}
int main()
{
int size_x = 10000;
int size_y = 10000;
bool large = true;
bool good = true;
hipError_t cudaStatus;
int **Imatrix = new int*[size_y]; Imatrix[0] = new int[size_x*size_y];
int **Omatrix = new int*[size_x]; Omatrix[0] = new int[size_y*size_x];
int **Tmatrix = new int*[size_x]; Tmatrix[0] = new int[size_y*size_x];
for (int i = 1; i < size_y; i++)
Imatrix[i] = Imatrix[i - 1] + size_x; //&Imatrix[0][i*size_x]; //<- ten sam efekt
for (int i = 1; i < size_x; i++) {
Omatrix[i] = Omatrix[i - 1] + size_y;
Tmatrix[i] = Tmatrix[i - 1] + size_y;
}
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
Imatrix[i][j] = i * size_x + j;
cout << "MACIERZ WEJSCIOWA" << endl;
ShowMatrix(Imatrix, size_x, size_y, large);
int size_x2 = size_y;
int size_y2 = size_x;
for (int i = 0; i < size_y2; i++)
for (int j = 0; j < size_x2; j++)
Omatrix[i][j] = j * size_y2 + i;
cout << endl << endl;
cout << "SPODZIEWANA MACIERZ WYJSCIOWA" << endl;
ShowMatrix(Omatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE CPU
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
LARGE_INTEGER StartCPU, StopCPU, TimeCPU;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartCPU);
TransposeCPU(Imatrix, Tmatrix, size_x, size_y);
QueryPerformanceCounter(&StopCPU);
TimeCPU.QuadPart = StopCPU.QuadPart - StartCPU.QuadPart;
TimeCPU.QuadPart = TimeCPU.QuadPart * 1000 / frequency.QuadPart; //ms
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BLAD TRANSPOZYCJI" << endl;
return -1;
}
cout << "TRANSPOZYCJA CPU'OWA czas: " << TimeCPU.QuadPart << "ms" << endl;
//TimeCPU.QuadPart = TimeCPU.QuadPart * 1000;
ShowMatrix(Omatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE NAIVE
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
///LARGE_INTEGER StartNaive, StopNaive, TimeNaive;
///QueryPerformanceFrequency(&frequency);
///QueryPerformanceCounter(&StartNaive);
cudaStatus = TransposeNaive(Imatrix[0], Tmatrix[0], size_x, size_y, TimeCPU);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "TransposeNaive failed!");
return 1;
}
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BD TRANSPOZYCJI" << endl;
return -1;
}
///QueryPerformanceCounter(&StopNaive);
///TimeNaive.QuadPart = StopNaive.QuadPart - StartNaive.QuadPart;
///TimeNaive.QuadPart = TimeNaive.QuadPart * 1000000 / frequency.QuadPart / 1000; //ms
//cout << "MACIERZ NAIVNA czas: " << TimeNaive.QuadPart << "ms" << endl;
ShowMatrix(Tmatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE SHARED
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
///LARGE_INTEGER StartShared, StopShared, TimeShared;
///QueryPerformanceFrequency(&frequency);
///QueryPerformanceCounter(&StartShared);
cudaStatus = TransposeShared(Imatrix[0], Tmatrix[0], size_x, size_y, TimeCPU);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "TransposeShared failed!");
cout << endl << hipGetErrorString(cudaStatus) << endl;
return 1;
}
///QueryPerformanceCounter(&StopShared);
///TimeShared.QuadPart = StopShared.QuadPart - StartShared.QuadPart;
///TimeShared.QuadPart = TimeShared.QuadPart * 1000000 / frequency.QuadPart / 1000; //ms
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BD TRANSPOZYCJI" << endl;
return -1;
}
//cout << "MACIERZ SHAREDOWA czas: " << TimeShared.QuadPart << "ms" << endl;
ShowMatrix(Tmatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE SHARED NO CONFLICT
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
///LARGE_INTEGER StartSharedNoConflicts, StopSharedNoConflicts, TimeSharedNoConflicts;
///QueryPerformanceFrequency(&frequency);
///QueryPerformanceCounter(&StartSharedNoConflicts);
cudaStatus = TransposeSharedNoConflict(Imatrix[0], Tmatrix[0], size_x, size_y, TimeCPU);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "TransposeShared failed!");
cout << endl << hipGetErrorString(cudaStatus) << endl;
return 1;
}
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BD TRANSPOZYCJI" << endl;
return -1;
}
///QueryPerformanceCounter(&StopSharedNoConflicts);
///TimeSharedNoConflicts.QuadPart = StopSharedNoConflicts.QuadPart - StartSharedNoConflicts.QuadPart;
///TimeSharedNoConflicts.QuadPart = TimeSharedNoConflicts.QuadPart * 1000000 / frequency.QuadPart / 1000; //ms
//cout << "MACIERZ SHAREDOWA BEZ KONFLIKTW czas: " << TimeSharedNoConflicts.QuadPart << "ms" << endl;
ShowMatrix(Tmatrix, size_x2, size_y2, large);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t TransposeNaive(int *Imatrix, int *Nmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU)
{
int *dev_I = 0;
int *dev_N = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_I, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_N, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_I, Imatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_N, Nmatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
dim3 block(TILE, TILE);
int grid_x = (size_x % TILE == 0) ? size_x / TILE : size_x / TILE + 1;
int grid_y = (size_y % TILE == 0) ? size_y / TILE : size_y / TILE + 1;
dim3 grid(grid_x, grid_y);
// Launch a kernel on the GPU with one thread for each element.
//TransposeNaiveGPU << <10000, 1024 >> > (dev_I, dev_N, size_x, size_y);
LARGE_INTEGER StartNaive, StopNaive, TimeNaive;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartNaive);
TransposeNaiveGPU2D << <grid, block >> > (dev_I, dev_N, size_x, size_y);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
QueryPerformanceCounter(&StopNaive);
TimeNaive.QuadPart = StopNaive.QuadPart - StartNaive.QuadPart;
TimeNaive.QuadPart = TimeNaive.QuadPart * 1000 / frequency.QuadPart;
cout << "CZAS NAIVNEJ TRANSPOZYCJI= " << TimeNaive.QuadPart << "ms" << endl;
cout << "PRZYSPIESZENIE GPU WZGLEDEM CPU= " << (double)TimeCPU.QuadPart / TimeNaive.QuadPart << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(Nmatrix, dev_N, size_x*size_y * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_I);
hipFree(dev_N);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t TransposeShared(int *Imatrix, int *Smatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU)
{
int *dev_I = 0;
int *dev_S = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_I, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_S, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_I, Imatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_S, Smatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
dim3 block(TILE, TILE);
int grid_x = (size_x % TILE == 0) ? size_x / TILE : size_x / TILE + 1;
int grid_y = (size_y % TILE == 0) ? size_y / TILE : size_y / TILE + 1;
dim3 grid(grid_x, grid_y);
// Launch a kernel on the GPU with one thread for each element.
//TransposeSharedGPU << <10000, 1024, TILE*TILE * sizeof(int) >> > (dev_I, dev_S, size_x, size_y, TILE);
LARGE_INTEGER StartShared, StopShared, TimeShared;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartShared);
TransposeSharedGPU2D << <grid, block >> > (dev_I, dev_S, size_x, size_y);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
QueryPerformanceCounter(&StopShared);
TimeShared.QuadPart = StopShared.QuadPart - StartShared.QuadPart;
TimeShared.QuadPart = TimeShared.QuadPart * 1000 / frequency.QuadPart;
cout << "CZAS SHAREDOWEJ TRANSPOZYCJI= " << TimeShared.QuadPart << "ms" << endl;
cout << "PRZYSPIESZENIE GPU WZGLEDEM CPU= " << (double)TimeCPU.QuadPart / TimeShared.QuadPart << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(Smatrix, dev_S, size_x*size_y * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_I);
hipFree(dev_S);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t TransposeSharedNoConflict(int *Imatrix, int *Cmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU)
{
int *dev_I = 0;
int *dev_C = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_I, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_C, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_I, Imatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_C, Cmatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
int TILE = 32;
dim3 block(TILE, TILE);
int grid_x = (size_x % TILE == 0) ? size_x / TILE : size_x / TILE + 1;
int grid_y = (size_y % TILE == 0) ? size_y / TILE : size_y / TILE + 1;
dim3 grid(grid_x, grid_y);
// Launch a kernel on the GPU with one thread for each element.
//TransposeSharedGPU << <10000, 1024, TILE*TILE * sizeof(int) >> > (dev_I, dev_S, size_x, size_y, TILE);
LARGE_INTEGER StartSharedNoConflicts, StopSharedNoConflicts, TimeSharedNoConflicts;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartSharedNoConflicts);
TransposeSharedNoConflictGPU2D << <grid, block >> > (dev_I, dev_C, size_x, size_y);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
QueryPerformanceCounter(&StopSharedNoConflicts);
TimeSharedNoConflicts.QuadPart = StopSharedNoConflicts.QuadPart - StartSharedNoConflicts.QuadPart;
TimeSharedNoConflicts.QuadPart = TimeSharedNoConflicts.QuadPart * 1000 / frequency.QuadPart;
cout << "CZAS SHAREDOWEJ TRANSPOZYCJI BEZ KONFLIKTW= " << TimeSharedNoConflicts.QuadPart << "ms" << endl;
cout << "PRZYSPIESZENIE GPU WZGLEDEM CPU= " << (double)TimeCPU.QuadPart / TimeSharedNoConflicts.QuadPart << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(Cmatrix, dev_C, size_x*size_y * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_I);
hipFree(dev_C);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t IsTransposed(int *Omatrix, int *Tmatrix, int size_x, int size_y, bool* good)
{
int *dev_O = 0;
int *dev_T = 0;
bool *dev_g = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_O, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_T, size_x*size_y * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_g, sizeof(bool));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_O, Omatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_T, Tmatrix, size_x*size_y * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_g, &good, sizeof(bool), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
int Blocks = (size_x*size_y % 1024) ? size_x * size_y / 1024 : size_x * size_y % 1024 + 1;
// Launch a kernel on the GPU with one thread for each element.
//TransposeNaiveGPU << <10000, 1024 >> > (dev_I, dev_N, size_x, size_y);
IsTransposedGPU << <Blocks, 1024 >> > (dev_O, dev_T, size_x, size_y, dev_g);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(good, dev_g, sizeof(bool), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_O);
hipFree(dev_T);
hipFree(dev_g);
return cudaStatus;
} | 4378109374df3e1c7558d5330f104849e82395db.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <windows.h>
using namespace std;
cudaError_t TransposeNaive(int *Imatrix, int *Nmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU);
cudaError_t TransposeShared(int *Imatrix, int *Smatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU);
cudaError_t TransposeSharedNoConflict(int *Imatrix, int *Cmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU);
cudaError_t IsTransposed(int *Omatrix, int *Tmatrix, int size_x, int size_y, bool* good);
int const TILE = 32;
__global__ void IsTransposedGPU(int*Omatrix, int*Tmatrix, int size_x, int size_y, bool *good) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size_x*size_y)
if (Omatrix[index] != Tmatrix[index])
good = false;
}
__global__ void TransposeNaiveGPU(int* Imatrix, int* Nmatrix, int size_x, int size_y)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
int x = index % size_x;
int y = index / size_x;
if (x < size_x)
if (y < size_y) {
Nmatrix[x*size_y + y] = Imatrix[y*size_x + x];
}
}
__global__ void TransposeNaiveGPU2D(int* Imatrix, int* Nmatrix, int size_x, int size_y)
{
int x = blockIdx.x*TILE + threadIdx.x;
int y = blockIdx.y*TILE + threadIdx.y;
if (x < size_x)
if (y < size_y)
Nmatrix[x*size_y + y] = Imatrix[y*size_x + x];
}
__global__ void TransposeSharedGPU(int* Imatrix, int* Smatrix, int size_x, int size_y)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
int x = index % size_x;
int y = index / size_x;
if (x < size_x)
if (y < size_y) {
int Tindex = threadIdx.x;
extern __shared__ int temp[];
int Tx = Tindex % TILE;
int Ty = Tindex / TILE;
temp[Ty* TILE + Tx] = Imatrix[y*size_x + x];
__syncthreads();
Smatrix[x*size_y + y] = temp[Ty* TILE + Tx];
}
}
__global__ void TransposeSharedGPU2D(int* Imatrix, int* Smatrix, int size_x, int size_y)
{
int x = blockIdx.x*TILE + threadIdx.x;
int y = blockIdx.y*TILE + threadIdx.y;
if (x < size_x)
if (y < size_y) {
int Tindex = threadIdx.x;
__shared__ int temp[TILE][TILE];
int Tx = threadIdx.x;
int Ty = threadIdx.y;
temp[Tx][Ty] = Imatrix[y*size_x + x];
__syncthreads();
Smatrix[x*size_y + y] = temp[Tx][Ty];
}
}
__global__ void TransposeSharedNoConflictGPU2D(int* Imatrix, int* Smatrix, int size_x, int size_y)
{
int x = blockIdx.x*TILE + threadIdx.x;
int y = blockIdx.y*TILE + threadIdx.y;
if (x < size_x)
if (y < size_y) {
int Tindex = threadIdx.x;
__shared__ int temp[TILE][TILE + 1];
int Tx = threadIdx.x;
int Ty = threadIdx.y;
temp[Ty][Tx] = Imatrix[y*size_x + x];
__syncthreads();
Smatrix[x*size_y + y] = temp[Ty][Tx];
}
}
void TransposeCPU(int**Imatrix, int**Pmatrix, int size_x, int size_y) {
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
Pmatrix[j][i] = Imatrix[i][j];
}
void ShowMatrix(int** matrix, int size_x, int size_y, bool large) {
if (large) {
size_x = 16;
size_y = 16;
}
for (int i = 0; i < size_y; i++) {
for (int j = 0; j < size_x; j++) {
cout << matrix[i][j] << "\t";
}
cout << endl;
}
}
void Zero(int**Pmatrix, int size_x, int size_y) {
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
Pmatrix[i][j] = 0;;
}
void IsTransposedCPU(int**Omatrix, int**Tmatrix, int size_y, int size_x, bool*good) {
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
if (Omatrix[i][j] != Tmatrix[i][j])
*good = false;
}
int main()
{
int size_x = 10000;
int size_y = 10000;
bool large = true;
bool good = true;
cudaError_t cudaStatus;
int **Imatrix = new int*[size_y]; Imatrix[0] = new int[size_x*size_y];
int **Omatrix = new int*[size_x]; Omatrix[0] = new int[size_y*size_x];
int **Tmatrix = new int*[size_x]; Tmatrix[0] = new int[size_y*size_x];
for (int i = 1; i < size_y; i++)
Imatrix[i] = Imatrix[i - 1] + size_x; //&Imatrix[0][i*size_x]; //<- ten sam efekt
for (int i = 1; i < size_x; i++) {
Omatrix[i] = Omatrix[i - 1] + size_y;
Tmatrix[i] = Tmatrix[i - 1] + size_y;
}
for (int i = 0; i < size_y; i++)
for (int j = 0; j < size_x; j++)
Imatrix[i][j] = i * size_x + j;
cout << "MACIERZ WEJSCIOWA" << endl;
ShowMatrix(Imatrix, size_x, size_y, large);
int size_x2 = size_y;
int size_y2 = size_x;
for (int i = 0; i < size_y2; i++)
for (int j = 0; j < size_x2; j++)
Omatrix[i][j] = j * size_y2 + i;
cout << endl << endl;
cout << "SPODZIEWANA MACIERZ WYJSCIOWA" << endl;
ShowMatrix(Omatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE CPU
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
LARGE_INTEGER StartCPU, StopCPU, TimeCPU;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartCPU);
TransposeCPU(Imatrix, Tmatrix, size_x, size_y);
QueryPerformanceCounter(&StopCPU);
TimeCPU.QuadPart = StopCPU.QuadPart - StartCPU.QuadPart;
TimeCPU.QuadPart = TimeCPU.QuadPart * 1000 / frequency.QuadPart; //ms
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BLAD TRANSPOZYCJI" << endl;
return -1;
}
cout << "TRANSPOZYCJA CPU'OWA czas: " << TimeCPU.QuadPart << "ms" << endl;
//TimeCPU.QuadPart = TimeCPU.QuadPart * 1000;
ShowMatrix(Omatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE NAIVE
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
///LARGE_INTEGER StartNaive, StopNaive, TimeNaive;
///QueryPerformanceFrequency(&frequency);
///QueryPerformanceCounter(&StartNaive);
cudaStatus = TransposeNaive(Imatrix[0], Tmatrix[0], size_x, size_y, TimeCPU);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "TransposeNaive failed!");
return 1;
}
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BŁĄD TRANSPOZYCJI" << endl;
return -1;
}
///QueryPerformanceCounter(&StopNaive);
///TimeNaive.QuadPart = StopNaive.QuadPart - StartNaive.QuadPart;
///TimeNaive.QuadPart = TimeNaive.QuadPart * 1000000 / frequency.QuadPart / 1000; //ms
//cout << "MACIERZ NAIVNA czas: " << TimeNaive.QuadPart << "ms" << endl;
ShowMatrix(Tmatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE SHARED
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
///LARGE_INTEGER StartShared, StopShared, TimeShared;
///QueryPerformanceFrequency(&frequency);
///QueryPerformanceCounter(&StartShared);
cudaStatus = TransposeShared(Imatrix[0], Tmatrix[0], size_x, size_y, TimeCPU);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "TransposeShared failed!");
cout << endl << cudaGetErrorString(cudaStatus) << endl;
return 1;
}
///QueryPerformanceCounter(&StopShared);
///TimeShared.QuadPart = StopShared.QuadPart - StartShared.QuadPart;
///TimeShared.QuadPart = TimeShared.QuadPart * 1000000 / frequency.QuadPart / 1000; //ms
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BŁĄD TRANSPOZYCJI" << endl;
return -1;
}
//cout << "MACIERZ SHAREDOWA czas: " << TimeShared.QuadPart << "ms" << endl;
ShowMatrix(Tmatrix, size_x2, size_y2, large);
cout << endl << endl;
Zero(Tmatrix, size_x2, size_y2);
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
// TRANSPOSE SHARED NO CONFLICT
// O=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====<>=====O
///LARGE_INTEGER StartSharedNoConflicts, StopSharedNoConflicts, TimeSharedNoConflicts;
///QueryPerformanceFrequency(&frequency);
///QueryPerformanceCounter(&StartSharedNoConflicts);
cudaStatus = TransposeSharedNoConflict(Imatrix[0], Tmatrix[0], size_x, size_y, TimeCPU);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "TransposeShared failed!");
cout << endl << cudaGetErrorString(cudaStatus) << endl;
return 1;
}
IsTransposedCPU(Omatrix, Tmatrix, size_x, size_y, &good);
if (!good) {
cout << "BŁĄD TRANSPOZYCJI" << endl;
return -1;
}
///QueryPerformanceCounter(&StopSharedNoConflicts);
///TimeSharedNoConflicts.QuadPart = StopSharedNoConflicts.QuadPart - StartSharedNoConflicts.QuadPart;
///TimeSharedNoConflicts.QuadPart = TimeSharedNoConflicts.QuadPart * 1000000 / frequency.QuadPart / 1000; //ms
//cout << "MACIERZ SHAREDOWA BEZ KONFLIKTÓW czas: " << TimeSharedNoConflicts.QuadPart << "ms" << endl;
ShowMatrix(Tmatrix, size_x2, size_y2, large);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t TransposeNaive(int *Imatrix, int *Nmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU)
{
int *dev_I = 0;
int *dev_N = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_I, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_N, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_I, Imatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_N, Nmatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
dim3 block(TILE, TILE);
int grid_x = (size_x % TILE == 0) ? size_x / TILE : size_x / TILE + 1;
int grid_y = (size_y % TILE == 0) ? size_y / TILE : size_y / TILE + 1;
dim3 grid(grid_x, grid_y);
// Launch a kernel on the GPU with one thread for each element.
//TransposeNaiveGPU << <10000, 1024 >> > (dev_I, dev_N, size_x, size_y);
LARGE_INTEGER StartNaive, StopNaive, TimeNaive;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartNaive);
TransposeNaiveGPU2D << <grid, block >> > (dev_I, dev_N, size_x, size_y);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
QueryPerformanceCounter(&StopNaive);
TimeNaive.QuadPart = StopNaive.QuadPart - StartNaive.QuadPart;
TimeNaive.QuadPart = TimeNaive.QuadPart * 1000 / frequency.QuadPart;
cout << "CZAS NAIVNEJ TRANSPOZYCJI= " << TimeNaive.QuadPart << "ms" << endl;
cout << "PRZYSPIESZENIE GPU WZGLEDEM CPU= " << (double)TimeCPU.QuadPart / TimeNaive.QuadPart << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(Nmatrix, dev_N, size_x*size_y * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_I);
cudaFree(dev_N);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t TransposeShared(int *Imatrix, int *Smatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU)
{
int *dev_I = 0;
int *dev_S = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_I, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_S, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_I, Imatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_S, Smatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
dim3 block(TILE, TILE);
int grid_x = (size_x % TILE == 0) ? size_x / TILE : size_x / TILE + 1;
int grid_y = (size_y % TILE == 0) ? size_y / TILE : size_y / TILE + 1;
dim3 grid(grid_x, grid_y);
// Launch a kernel on the GPU with one thread for each element.
//TransposeSharedGPU << <10000, 1024, TILE*TILE * sizeof(int) >> > (dev_I, dev_S, size_x, size_y, TILE);
LARGE_INTEGER StartShared, StopShared, TimeShared;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartShared);
TransposeSharedGPU2D << <grid, block >> > (dev_I, dev_S, size_x, size_y);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
QueryPerformanceCounter(&StopShared);
TimeShared.QuadPart = StopShared.QuadPart - StartShared.QuadPart;
TimeShared.QuadPart = TimeShared.QuadPart * 1000 / frequency.QuadPart;
cout << "CZAS SHAREDOWEJ TRANSPOZYCJI= " << TimeShared.QuadPart << "ms" << endl;
cout << "PRZYSPIESZENIE GPU WZGLEDEM CPU= " << (double)TimeCPU.QuadPart / TimeShared.QuadPart << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(Smatrix, dev_S, size_x*size_y * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_I);
cudaFree(dev_S);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t TransposeSharedNoConflict(int *Imatrix, int *Cmatrix, int size_x, int size_y, LARGE_INTEGER TimeCPU)
{
int *dev_I = 0;
int *dev_C = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_I, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_C, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_I, Imatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_C, Cmatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
int TILE = 32;
dim3 block(TILE, TILE);
int grid_x = (size_x % TILE == 0) ? size_x / TILE : size_x / TILE + 1;
int grid_y = (size_y % TILE == 0) ? size_y / TILE : size_y / TILE + 1;
dim3 grid(grid_x, grid_y);
// Launch a kernel on the GPU with one thread for each element.
//TransposeSharedGPU << <10000, 1024, TILE*TILE * sizeof(int) >> > (dev_I, dev_S, size_x, size_y, TILE);
LARGE_INTEGER StartSharedNoConflicts, StopSharedNoConflicts, TimeSharedNoConflicts;
LARGE_INTEGER frequency;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&StartSharedNoConflicts);
TransposeSharedNoConflictGPU2D << <grid, block >> > (dev_I, dev_C, size_x, size_y);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
QueryPerformanceCounter(&StopSharedNoConflicts);
TimeSharedNoConflicts.QuadPart = StopSharedNoConflicts.QuadPart - StartSharedNoConflicts.QuadPart;
TimeSharedNoConflicts.QuadPart = TimeSharedNoConflicts.QuadPart * 1000 / frequency.QuadPart;
cout << "CZAS SHAREDOWEJ TRANSPOZYCJI BEZ KONFLIKTÓW= " << TimeSharedNoConflicts.QuadPart << "ms" << endl;
cout << "PRZYSPIESZENIE GPU WZGLEDEM CPU= " << (double)TimeCPU.QuadPart / TimeSharedNoConflicts.QuadPart << endl;
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(Cmatrix, dev_C, size_x*size_y * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_I);
cudaFree(dev_C);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t IsTransposed(int *Omatrix, int *Tmatrix, int size_x, int size_y, bool* good)
{
int *dev_O = 0;
int *dev_T = 0;
bool *dev_g = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_O, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_T, size_x*size_y * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_g, sizeof(bool));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_O, Omatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_T, Tmatrix, size_x*size_y * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_g, &good, sizeof(bool), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
int Blocks = (size_x*size_y % 1024) ? size_x * size_y / 1024 : size_x * size_y % 1024 + 1;
// Launch a kernel on the GPU with one thread for each element.
//TransposeNaiveGPU << <10000, 1024 >> > (dev_I, dev_N, size_x, size_y);
IsTransposedGPU << <Blocks, 1024 >> > (dev_O, dev_T, size_x, size_y, dev_g);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(good, dev_g, sizeof(bool), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_O);
cudaFree(dev_T);
cudaFree(dev_g);
return cudaStatus;
} |
b4ea0b7e0c29ed9bcef8c0ba40d97c8482fe36ab.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// *********************************************************************
// A simple demo application that implements a
// vector dot product computation between 2 float arrays.
//
// Runs computations with on the GPU device and then checks results
// against basic host CPU/C++ computation.
// *********************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "shrUtils.h"
// Forward Declarations
void DotProductHost(const float* pfData1, const float* pfData2, float* pfResult, int iNumElements);
__global__
void dot_product(const float *__restrict__ a,
const float *__restrict__ b,
float *__restrict__ c,
const int n)
{
int iGID = blockIdx.x * blockDim.x + threadIdx.x;
if (iGID < n) {
int iInOffset = iGID << 2;
c[iGID] = a[iInOffset ] * b[iInOffset ] +
a[iInOffset + 1] * b[iInOffset + 1] +
a[iInOffset + 2] * b[iInOffset + 2] +
a[iInOffset + 3] * b[iInOffset + 3];
}
}
int main(int argc, char **argv)
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int iNumElements = atoi(argv[1]);
const int iNumIterations = atoi(argv[2]);
// set and log Global and Local work size dimensions
int szLocalWorkSize = 256;
// rounded up to the nearest multiple of the LocalWorkSize
int szGlobalWorkSize = shrRoundUp((int)szLocalWorkSize, iNumElements);
const size_t src_size = szGlobalWorkSize * 4;
const size_t src_size_bytes = src_size * sizeof(float);
const size_t dst_size = szGlobalWorkSize;
const size_t dst_size_bytes = dst_size * sizeof(float);
// Allocate and initialize host arrays
float* srcA = (float*) malloc (src_size_bytes);
float* srcB = (float*) malloc (src_size_bytes);
float* dst = (float*) malloc (dst_size_bytes);
float* Golden = (float*) malloc (sizeof(float) * iNumElements);
shrFillArray(srcA, 4 * iNumElements);
shrFillArray(srcB, 4 * iNumElements);
float *d_srcA;
float *d_srcB;
float *d_dst;
hipMalloc((void**)&d_srcA, src_size_bytes);
hipMemcpy(d_srcA, srcA, src_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_srcB, src_size_bytes);
hipMemcpy(d_srcB, srcB, src_size_bytes, hipMemcpyHostToDevice);
hipMalloc((void**)&d_dst, dst_size_bytes);
printf("Global Work Size \t\t= %d\nLocal Work Size \t\t= %d\n# of Work Groups \t\t= %d\n\n",
szGlobalWorkSize, szLocalWorkSize, (szGlobalWorkSize % szLocalWorkSize + szGlobalWorkSize/szLocalWorkSize));
dim3 grid (szGlobalWorkSize % szLocalWorkSize + szGlobalWorkSize/szLocalWorkSize);
dim3 block (szLocalWorkSize);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iNumIterations; i++)
hipLaunchKernelGGL(( dot_product), dim3(grid), dim3(block), 0, 0, d_srcA, d_srcB, d_dst, iNumElements);
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / iNumIterations);
hipMemcpy(dst, d_dst, dst_size_bytes, hipMemcpyDeviceToHost);
hipFree(d_dst);
hipFree(d_srcA);
hipFree(d_srcB);
// Compute and compare results for golden-host and report errors and pass/fail
printf("Comparing against Host/C++ computation...\n\n");
DotProductHost ((const float*)srcA, (const float*)srcB, (float*)Golden, iNumElements);
shrBOOL bMatch = shrComparefet((const float*)Golden, (const float*)dst, (unsigned int)iNumElements, 0.0f, 0);
printf("\nGPU Result %s CPU Result\n", (bMatch == shrTRUE) ? "matches" : "DOESN'T match");
free(srcA);
free(srcB);
free(dst);
free(Golden);
return EXIT_SUCCESS;
}
// "Golden" Host processing dot product function for comparison purposes
// *********************************************************************
void DotProductHost(const float* pfData1, const float* pfData2, float* pfResult, int iNumElements)
{
int i, j, k;
for (i = 0, j = 0; i < iNumElements; i++)
{
pfResult[i] = 0.0f;
for (k = 0; k < 4; k++, j++)
{
pfResult[i] += pfData1[j] * pfData2[j];
}
}
}
| b4ea0b7e0c29ed9bcef8c0ba40d97c8482fe36ab.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// *********************************************************************
// A simple demo application that implements a
// vector dot product computation between 2 float arrays.
//
// Runs computations with on the GPU device and then checks results
// against basic host CPU/C++ computation.
// *********************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda.h>
#include "shrUtils.h"
// Forward Declarations
void DotProductHost(const float* pfData1, const float* pfData2, float* pfResult, int iNumElements);
__global__
void dot_product(const float *__restrict__ a,
const float *__restrict__ b,
float *__restrict__ c,
const int n)
{
int iGID = blockIdx.x * blockDim.x + threadIdx.x;
if (iGID < n) {
int iInOffset = iGID << 2;
c[iGID] = a[iInOffset ] * b[iInOffset ] +
a[iInOffset + 1] * b[iInOffset + 1] +
a[iInOffset + 2] * b[iInOffset + 2] +
a[iInOffset + 3] * b[iInOffset + 3];
}
}
int main(int argc, char **argv)
{
if (argc != 3) {
printf("Usage: %s <number of elements> <repeat>\n", argv[0]);
return 1;
}
const int iNumElements = atoi(argv[1]);
const int iNumIterations = atoi(argv[2]);
// set and log Global and Local work size dimensions
int szLocalWorkSize = 256;
// rounded up to the nearest multiple of the LocalWorkSize
int szGlobalWorkSize = shrRoundUp((int)szLocalWorkSize, iNumElements);
const size_t src_size = szGlobalWorkSize * 4;
const size_t src_size_bytes = src_size * sizeof(float);
const size_t dst_size = szGlobalWorkSize;
const size_t dst_size_bytes = dst_size * sizeof(float);
// Allocate and initialize host arrays
float* srcA = (float*) malloc (src_size_bytes);
float* srcB = (float*) malloc (src_size_bytes);
float* dst = (float*) malloc (dst_size_bytes);
float* Golden = (float*) malloc (sizeof(float) * iNumElements);
shrFillArray(srcA, 4 * iNumElements);
shrFillArray(srcB, 4 * iNumElements);
float *d_srcA;
float *d_srcB;
float *d_dst;
cudaMalloc((void**)&d_srcA, src_size_bytes);
cudaMemcpy(d_srcA, srcA, src_size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_srcB, src_size_bytes);
cudaMemcpy(d_srcB, srcB, src_size_bytes, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_dst, dst_size_bytes);
printf("Global Work Size \t\t= %d\nLocal Work Size \t\t= %d\n# of Work Groups \t\t= %d\n\n",
szGlobalWorkSize, szLocalWorkSize, (szGlobalWorkSize % szLocalWorkSize + szGlobalWorkSize/szLocalWorkSize));
dim3 grid (szGlobalWorkSize % szLocalWorkSize + szGlobalWorkSize/szLocalWorkSize);
dim3 block (szLocalWorkSize);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iNumIterations; i++)
dot_product<<<grid, block>>>(d_srcA, d_srcB, d_dst, iNumElements);
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / iNumIterations);
cudaMemcpy(dst, d_dst, dst_size_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_dst);
cudaFree(d_srcA);
cudaFree(d_srcB);
// Compute and compare results for golden-host and report errors and pass/fail
printf("Comparing against Host/C++ computation...\n\n");
DotProductHost ((const float*)srcA, (const float*)srcB, (float*)Golden, iNumElements);
shrBOOL bMatch = shrComparefet((const float*)Golden, (const float*)dst, (unsigned int)iNumElements, 0.0f, 0);
printf("\nGPU Result %s CPU Result\n", (bMatch == shrTRUE) ? "matches" : "DOESN'T match");
free(srcA);
free(srcB);
free(dst);
free(Golden);
return EXIT_SUCCESS;
}
// "Golden" Host processing dot product function for comparison purposes
// *********************************************************************
void DotProductHost(const float* pfData1, const float* pfData2, float* pfResult, int iNumElements)
{
int i, j, k;
for (i = 0, j = 0; i < iNumElements; i++)
{
pfResult[i] = 0.0f;
for (k = 0; k < 4; k++, j++)
{
pfResult[i] += pfData1[j] * pfData2[j];
}
}
}
|
d7d214a36d1ead4d2b3dac10b10b66879ad05454.hip | // !!! This is a file automatically generated by hipify!!!
//============================================================================
// treeminer.cu
//
// Main file that calls a cuda kernel to compute the SPM tree mining algorithm
//
// Invocation may be read in README.md
//
//============================================================================
// LIBRARIES
#include <string>
#include <unistd.h>
#include <stdio.h>
#include <stack>
#include <list>
#include <iostream>
#include <map>
#include <vector>
#include <set>
// HEADERS
#include "treeminer.h"
#include "timetrack.h"
#include "calcdb.h"
#include "eqclass.h"
#include "hashtree.h"
#include "stats.h"
#include "hip/hip_runtime.h"
#include "tools.h"
// CUDA KERNEL
#include "cuda_kernel.cu"
// GPU VARIABLES
int warp_size=0;
int shared_memory_size=0; // in bytes
int node_size=-1;
int block_dim=512;
int maxNodeSz=200;
int blk_max_size=0;
// TIMING
TimeTracker kernel_tt;
double kernel_time;
TimeTracker preproc_tt;
double preproc_time;
// GLOBAL VARIABLES
string *infile;
string *outfile = new string("summary.out");
HashTree *CandK = NULL;
FreqHT FK;
Dbase_Ctrl_Blk *DCB;
Stats stats;
typedef vector<bool> bit_vector;
int tot_trans_cnt=0; //total number of valid transactions
map<int, int> treeSz_loc_mp; //each set of tree size ends where in the DB_array
double MINSUP_PER;
int MINSUPPORT = -1;
int DBASE_MAXITEM;
int DBASE_NUM_TRANS;
// DEFAULT FLAGS
bool output_console = false; //don't print freq subtrees to console
bool count_unique = true; //count support only once per tree
sort_vals sort_type = nosort; //default is to sort in increasing order
prune_vals prune_type = prune; //prune candidates by default
set<vector<int> > freq_cand;
vector<int> *ITCNT = NULL; //used for sorting F1
bool F1cmp(int x, int y) {
bool res = false;
if ((*ITCNT)[x] < (*ITCNT)[y])
res = true;
if (sort_type == incr)
return res;
else
return !res;
}
/**
*
* Parses arguments based on user set flags
*
* Flags:
* -i, input file of tree dataset
* -s, support threshold between (0,1)
* -c, <True> if printing the frequent subtrees to console. Default is <False>
* -p, <True> if pruning the database, <False> otherwise. Default is <True>
* -u, <True> if counting the subtree matches once per tree, <False> if weighted counting. Default is <True>
* -o, output file for results summary. Output is appended, not overwritten. Default is "summary.out"
*
*/
void parse_args(int argc, char **argv) {
extern char * optarg;
int c;
if (argc < 5){
cout << "usage: gpuTreeMiner -i<input_file> -s<support> -c<print output> -p<prune> -u<unique counting> -o<output_file>\n";
cout << " -i, dataset of trees\n";
cout << " -s, support threshold between (0,1)\n";
cout << " -c, <True> if printing the frequent subtrees to console. Default is <False> \n";
cout << " -p, <True> if pruning the database, <False> otherwise. Default is <True> \n";
cout << " -u, <True> if counting the subtree matches once per tree, <False> if weighted counting. Default is <True> \n";
cout << " -o, output file for results summary. Output is appended, not overwritten. Default is \"summary.out\"";
exit(0);
}
else {
while ((c = getopt(argc, argv, "bh:i:cp:s:S:uz:o:")) != -1) {
switch (c) {
case 'b':
Dbase_Ctrl_Blk::binary_input = true;
break;
case 'h': //hash threshold
HashTree::threshold() = atoi(optarg);
break;
case 'i': //input files
infile = new string(optarg);
break;
case 'c': //print freq subtrees
output_console = true;
break;
case 'p':
prune_type = (prune_vals) atoi(optarg);
break;
case 's': //support value for L2
MINSUP_PER = atof(optarg);
break;
case 'S': //absolute support
MINSUPPORT = atoi(optarg);
break;
case 'u': //count support multiple times per tree
count_unique = false;
break;
case 'z':
sort_type = (sort_vals) atoi(optarg);
break;
case 'o':
outfile = new string(optarg);
break;
}
}
}
}
void erase_set(set<vector<int> > &freq_set){
freq_set.erase(freq_set.begin(),freq_set.end());
}
/**
*
* F1 Frequency Generation
*
* Initial frequency generation and pruning from database using minsup value
*
*/
void get_F1() {
// SETUP TIME TRACKER
TimeTracker tt;
double te;
// F1 SETUP
int i, j, it;
vector<int> itcnt;
vector<int> flgs;
tt.Start();
DBASE_MAXITEM = 0;
DBASE_NUM_TRANS = 0;
while (DCB->get_next_trans()) {
for (i = 0; i < DCB->TransSz; i++) {
it = DCB->TransAry[i];
if (it != BranchIt) {
if (it >= DBASE_MAXITEM) {
for (j = DBASE_MAXITEM; j <= it; j++) {
itcnt.push_back(0);
flgs.push_back(-1);
}
DBASE_MAXITEM = it + 1;
}
if (count_unique) {
if (flgs[it] == DCB->Cid)
continue;
else
flgs[it] = DCB->Cid;
}
itcnt[it]++;
}
}
if (DCB->MaxTransSz < DCB->TransSz)
DCB->MaxTransSz = DCB->TransSz;
DBASE_NUM_TRANS++;
}
// SET VALUE OF MINSUPPORT
if (MINSUPPORT == -1)
MINSUPPORT = (int) (MINSUP_PER * DBASE_NUM_TRANS + 0.5);
if (MINSUPPORT < 1)
MINSUPPORT = 1;
cout << "DBASE_NUM_TRANS : " << DBASE_NUM_TRANS << endl;
cout << "DBASE_MAXITEM : " << DBASE_MAXITEM << endl;
cout << "MINSUPPORT : " << MINSUPPORT << " (" << MINSUP_PER << ")" << endl;
// COUNT NUMBER OF FREQUENT ITEMS
DCB->NumF1 = 0;
for (i = 0; i < DBASE_MAXITEM; i++)
if (itcnt[i] >= MINSUPPORT)
DCB->NumF1++;
int *it_order = new int[DBASE_MAXITEM];
for (i = 0; i < DBASE_MAXITEM; i++)
it_order[i] = i;
if (sort_type != nosort) {
ITCNT = &itcnt;
sort(&it_order[0], &it_order[DBASE_MAXITEM], F1cmp);
}
// CONSTRUCT FORWARD & REVERSE MAPPNG FROM ITEMS TO FREQ ITEMS
DCB->FreqIdx = new int[DCB->NumF1];
DCB->FreqMap = new int[DBASE_MAXITEM];
for (i = 0, j = 0; i < DBASE_MAXITEM; i++) {
if (itcnt[it_order[i]] >= MINSUPPORT) {
if (output_console)
cout << i << " - " << itcnt[it_order[i]] << endl;
DCB->FreqIdx[j] = it_order[i];
DCB->FreqMap[it_order[i]] = j;
j++;
} else
DCB->FreqMap[it_order[i]] = -1;
}
//console output F1 summary
cout << "F1 - " << DCB->NumF1 << " " << DBASE_MAXITEM << endl;
if (sort_type != nosort) {
ITCNT = NULL;
delete[] it_order;
}
te = tt.Stop();
stats.add(DBASE_MAXITEM, DCB->NumF1, te);
}
/**
*
* Output pointer array to console
*
* @param array Pointer array of type int
* @param size Size of the array
*
*/
void print_array(int* array, int size) {
for (int i=0; i<size; i++) {
cout << array[i] << " ";
}
cout << endl;
}
void get_F2() {
int i, j;
int it1, it2;
int scnt;
int tree_id=0;
// SETUP TIMETRACKER
TimeTracker tt;
double te;
// START F2 TIME
tt.Start();
//itcnt2 is a matrix of pairs p, p.first is count, p.second is flag
int **itcnt2 = new int*[DCB->NumF1];
int **flgs = new int*[DCB->NumF1];
for (i = 0; i < DCB->NumF1; i++) {
itcnt2[i] = new int[DCB->NumF1];
flgs[i] = new int[DCB->NumF1];
for (j = 0; j < DCB->NumF1; j++) {
itcnt2[i][j] = 0;
flgs[i][j] = -1;
}
}
// CREATE DB ARRAY
DCB->DB_array = new int*[DBASE_NUM_TRANS];
multimap<int, int> tree_sz_mp; //key: size of the tree, value: list of tree of that size. For sorting the dataset
vector<int>* freqCand;
int nod_num = 0;
while (DCB->get_next_trans()) {
nod_num = 0;
DCB->get_valid_trans();
//Creating DB array with the valid transaction (removing infrequent items), the size of the transaction is decreased here
if(DCB->TransSz > 1) {
tot_trans_cnt++;
DCB->DB_array[tree_id] = new int[DCB->TransSz + 2];
DCB->DB_array_size+= DCB->TransSz+1;
DCB->DB_array[tree_id][0] = DCB->TransSz;
for (int trans_iter = 0; trans_iter < DCB->TransSz; trans_iter++) {
DCB->DB_array[tree_id][trans_iter + 2] = DCB->TransAry[trans_iter];
if(DCB->TransAry[trans_iter] != BranchIt){ //Number of nodes in one transaction
nod_num++;
}
}
DCB->DB_array[tree_id][1]=nod_num;
tree_sz_mp.insert(pair<int,int>(nod_num,tree_id));
tree_id++;
//count a pair only once per cid
for (i = 0; i < DCB->TransSz; i++) {
it1 = DCB->TransAry[i];
if (it1 != BranchIt && it1 != DCB->NumF1) {
scnt = 0;
for (j = i + 1; scnt >= 0 && j < DCB->TransSz; j++) {
it2 = DCB->TransAry[j];
if (it2 != BranchIt && it2 != DCB->NumF1) {
scnt++;
if (count_unique) {
if (flgs[it1][it2] == DCB->Cid)
continue;
else
flgs[it1][it2] = DCB->Cid;
}
itcnt2[it1][it2]++;
} else
scnt--;
}
}
}
} else
continue;
}
// SORT DATASET & MAKE treeSz_loc_map TO SEE TREE SIZE LOCATION IN DATABASE
int** DB_array_tmp = new int*[tree_id];
int loc_in_sorted_db=0;
// SORT DB_array
for(multimap<int, int>::iterator it=tree_sz_mp.begin(); it!=tree_sz_mp.end();it=tree_sz_mp.upper_bound(it->first)){
pair<multimap<int, int>::iterator, multimap<int, int>::iterator> eql_rng = tree_sz_mp.equal_range(it->first);
for(multimap<int, int>::iterator it2=eql_rng.first;it2!=eql_rng.second;it2++){
DB_array_tmp[loc_in_sorted_db] = DCB->DB_array[it2->second];
loc_in_sorted_db++;
}
treeSz_loc_mp[it->first]=loc_in_sorted_db-1;
}
DCB->DB_array = DB_array_tmp;
DBASE_NUM_TRANS = tree_id;
int F2cnt = 0;
// GENERATE NEW HASHTREE
CandK = new HashTree(0);
CandK->maxdepth() = 1;
if (prune_type == prune)
FK.clearall();
// COUNT FREQUENT PATTERNS & GENERATE eqclass
Eqclass *eq;
for (i = 0; i < DCB->NumF1; i++) {
eq = NULL;
for (j = 0; j < DCB->NumF1; j++) {
if (itcnt2[i][j] >= MINSUPPORT) {
F2cnt++;
if (eq == NULL) {
eq = new Eqclass();
eq->prefix().push_back(i);
}
eq->add_node(j, 0, itcnt2[i][j]);
if (prune_type == prune){
freqCand = new vector<int>;
freqCand->push_back(i);
freqCand->push_back(j);
freq_cand.insert(*freqCand);
}
}
if (eq != NULL) {
if (output_console)
cout << DCB->FreqIdx[i] << " " << DCB->FreqIdx[j] << " - "
<< itcnt2[i][j] << endl;
}
}
if (eq != NULL) {
CandK->add_element(eq);
CandK->eqlist()->push_front(eq);
CandK->count()++;
}
}
// FREE MEMORY
for (i = 0; i < DCB->NumF1; i++) {
delete[] itcnt2[i];
delete[] flgs[i];
}
delete[] itcnt2;
delete[] flgs;
cout << "F2 - " << F2cnt << " " << DCB->NumF1 * DCB->NumF1 << endl;
// LOG F2 END TIME
te = tt.Stop();
stats.add(DCB->NumF1 * DCB->NumF1, F2cnt, te);
}
/**
*
* Add node to Eqclass, checks for pruning
*
*/
void add_node(int iter, Eqclass *neq, int val, int pos) {
if (prune_type == noprune) {
//don't do any pruning
neq->add_node(val, pos);
return;
}
// PRUNE BASED ON FREQUENT SUBTREE
static vector<int> cand;
static vector<int> subtree;
int hval;
int scope, scnt;
// FORM THE CANDIDATE PREFIX
cand = neq->prefix();
scnt = neq->get_scope(pos, scope); //checks the scope of node.pos
while (scnt > scope) {
cand.push_back(BranchIt);
scnt--;
}
cand.push_back(val);
int cnt=0;
vector<int> candTmp;
vector<int>::iterator it1, it2;
// PRUNING
candTmp = cand;
int num_root_visiting=0; //used for checking if deleting root node or not
// CHECKING THE ROOT
if(find(candTmp.begin(), candTmp.end(), BranchIt) == candTmp.end()){
for(it1=candTmp.begin(); it1 != candTmp.end()-1; it1++){
candTmp.erase(it1);
if(freq_cand.find(candTmp) == freq_cand.end()){
return;
}
candTmp.clear();
candTmp = cand;
}
}
// CHECKING THE ROOT CANDIDATE
candTmp.clear();
candTmp = cand;
cnt=0;
it1=candTmp.begin();
for(it2=candTmp.begin()+1; it2 != candTmp.end(); it2++){
if(*it2 != BranchIt){
cnt++;
}
else{
cnt--;
if(cnt == 0)
num_root_visiting++;
}
}
if(num_root_visiting==0){
candTmp.erase(it1);
if(freq_cand.find(candTmp) == freq_cand.end()){
return;
}
candTmp.clear();
candTmp = cand;
}
// CHECKING THE REST OF THE NODES
for(it1=candTmp.begin()+1; it1 != candTmp.end()-1; it1++){
if(*it1 != BranchIt){
cnt=0;
for(it2=it1+1; it2 != candTmp.end(); it2++){
if(*it2 != BranchIt){
cnt++;
}
else{
cnt--;
if(cnt==-1){
candTmp.erase(it1);
candTmp.erase(it2-1);
break;
}
}
}
if(candTmp.size() == cand.size()){
candTmp.erase(it1);
}
if(freq_cand.find(candTmp) == freq_cand.end()){
return;
}
candTmp.clear();
candTmp = cand;
}
}
//otherwise add the node
neq->add_node(val, pos);
}
void cand_gen(int iter, Eqclass &eq, list<Eqclass *> &neweql) {
Eqclass *neq;
list<Eqnode>::iterator ni, nj;
for (ni = eq.nlist().begin(); ni != eq.nlist().end(); ++ni) {
neq = NULL;
for (nj = eq.nlist().begin(); nj != eq.nlist().end(); ++nj) {
if (ni->pos < nj->pos)
continue;
if (neq == NULL) {
neq = new Eqclass;
neq->set_prefix(eq.prefix(), *ni);
}
if (ni->pos > nj->pos)
add_node(iter, neq, nj->val, nj->pos);
else {
add_node(iter, neq, nj->val, nj->pos);
add_node(iter, neq, nj->val, neq->prefix().size() - 1);
}
}
if (!neq->nlist().empty()) {
neweql.push_back(neq);
} else
delete neq;
}
}
void candidate_generation(int iter, HashTree *ht, int &candcnt) {
list<Eqclass *> *oldeql = ht->eqlist();
list<Eqclass *> *neweql = new list<Eqclass *>;
Eqclass *eq;
ht->flag() = -1; //reset the flag
while (!oldeql->empty()) {
eq = oldeql->front();
cand_gen(iter, *eq, *neweql);
delete eq;
ht->count()--;
oldeql->pop_front();
}
list<Eqclass *>::iterator ni;
for (ni = neweql->begin(); ni != neweql->end(); ni++) {
ht->eqlist()->push_back(*ni);
ht->count()++;
candcnt += (*ni)->nlist().size();
}
delete neweql;
}
ostream & operator<<(ostream& fout, vector<int> &vec) {
fout << vec[0];
for (int i = 1; i < vec.size(); i++)
fout << " " << vec[i];
return fout;
}
bool incr_nodes(Eqclass *eq, int tpos, int tscope, stack<int> &stk,
bit_vector &cflgs) {
int i, f, st, en, l;
bool retval = false;
int fcnt = 0;
int scope, ttscope, ttpos;
stack<int> tstk;
list<Eqnode>::iterator ni = eq->nlist().begin();
for (f = 0; ni != eq->nlist().end(); ni++, f++) {
//if unique counts and node has been counted, skip to next node
if (count_unique && cflgs[f]) {
fcnt++;
continue;
}
ttscope = tscope;
scope = ttscope;
ttpos = tpos;
bool skip = false;
int st, en;
en = eq->get_scope(ni->pos, st);
if (en > st) {
skip = true;
while (en > st) {
st++;
tstk.push(stk.top());
stk.pop();
}
ttscope = tstk.top();
}
while (skip && scope >= ttscope && ttpos < DCB->DB_array[DCB->db_iter][0]) {
if (DCB->DB_array[DCB->db_iter][ttpos + 1] == BranchIt)
scope--;
else
scope++;
ttpos++;
}
if (skip)
ttscope = stk.top();
//search for the last item within cur_scope
for (i = ttpos; i < DCB->DB_array[DCB->db_iter][0]; i++) {
if (DCB->DB_array[DCB->db_iter][i + 1] == BranchIt)
scope--;
else
scope++;
if (scope < ttscope)
break;
if (ni->val == DCB->DB_array[DCB->db_iter][i + 1]) {
if (count_unique) {
if (!cflgs[f]) {
cflgs[f] = true;
fcnt++;
ni->sup++;
}
} else
ni->sup++;
}
}
while (!tstk.empty()) {
stk.push(tstk.top());
tstk.pop();
}
}
//all nodes have been seen
if (count_unique && fcnt == cflgs.size())
retval = true;
return retval;
}
bool incr_support(Eqclass *eq, int tpos, int ppos, int tscope, stack<int> &stk,
bit_vector &cflgs) {
int i;
int scope, ttscope;
stack<int> tstk;
scope = tscope;
bool skip = false;
if (eq->prefix()[ppos] == BranchIt) {
skip = true;
while (eq->prefix()[ppos] == BranchIt) {
tstk.push(stk.top());
stk.pop();
ppos++;
}
tscope = tstk.top();
}
while (skip && scope >= tscope && tpos < DCB->DB_array[DCB->db_iter][0]) {
if (DCB->DB_array[DCB->db_iter][tpos + 1] == BranchIt)
scope--;
else
scope++;
tpos++;
}
if (skip)
tscope = stk.top();
bool allfound = false;
for (i = tpos; i < DCB->DB_array[DCB->db_iter][0] && !allfound; i++) {
if (DCB->DB_array[DCB->db_iter][i + 1] == BranchIt)
scope--;
else
scope++;
if (scope < tscope)
break;
if (DCB->DB_array[DCB->db_iter][i + 1] == eq->prefix()[ppos]) {
stk.push(scope);
if (ppos == eq->prefix().size() - 1) {
allfound = incr_nodes(eq, i + 1, scope, stk, cflgs);
} else {
allfound = incr_support(eq, i + 1, ppos + 1, scope, stk, cflgs);
}
stk.pop();
}
}
while (!tstk.empty()) {
stk.push(tstk.top());
tstk.pop();
}
return allfound;
}
/**
*
* Returns false if Eqnode support is greater than or equal to the minimum support threshold, true if otherwise
*
*/
static bool notfrequent(Eqnode &n) {
if (n.sup >= MINSUPPORT)
return false;
else
return true;
}
bool get_frequent(int iter, HashTree *ht, int &freqcnt) {
int i;
bool empty_leaf = false;
if (ht->isleaf()) {
list<Eqclass *> *eql = ht->eqlist();
Eqclass *eq;
list<Eqclass *>::iterator ni;
for (ni = eql->begin(); ni != eql->end() && !eql->empty();) {
eq = *ni;
list<Eqnode>::iterator nj;
nj = remove_if(eq->nlist().begin(), eq->nlist().end(), notfrequent);
eq->nlist().erase(nj, eq->nlist().end());
freqcnt += eq->nlist().size();
if (output_console && !eq->nlist().empty())
eq->print(DCB);
if (eq->nlist().empty()) {
ni = eql->erase(ni);
CandK->count()--;
}
else {
if (prune_type == prune)
FK.add(eq);
ni++;
}
}
if (eql->empty())
empty_leaf = true;
} else {
HTable::iterator ti, hi = ht->htable().begin();
int ecnt = 0;
for (; hi != ht->htable().end();) {
bool ret = get_frequent(iter, (*hi).second, freqcnt);
if (ret) {
ecnt++;
ti = hi;
hi++;
ht->htable().erase(ti);
} else
hi++;
}
}
return empty_leaf;
}
int* create_cand_array(int candcnt, int iter){
int* cand_array = new int[candcnt*(2*iter-1)];
int cand_array_it = 0;
int lastBranchDepth = 0;
list<Eqclass *> *eql = CandK->eqlist();
list<Eqclass *>::iterator ei;
vector<int> righ_path_pos; //position in the prefix vector
for(ei = eql->begin(); ei != eql->end(); ei++){
Eqclass* eq = *ei;
list<Eqnode>::iterator ni;
// CREATE RIGHTMOST PATH
righ_path_pos.clear();
for(int i=0; i<eq->prefix().size();i++){
if(eq->prefix()[i] != BranchIt){
righ_path_pos.push_back(i);
}
else{
righ_path_pos.pop_back();
}
}
for(ni=eq->nlist().begin(); ni!=eq->nlist().end(); ni++){
//copy the prefix
for(int i=0; i<eq->prefix().size();i++){
cand_array[cand_array_it] = eq->prefix()[i];
cand_array_it++;
}
//add the extension node and the remaining branches
for(int i=righ_path_pos.size()-1; i>-1;i--){
if(ni->pos == righ_path_pos[i]){
cand_array[cand_array_it] = ni->val;
cand_array_it++;
cand_array[cand_array_it] = BranchIt;
cand_array_it++;
}
else{
cand_array[cand_array_it] = BranchIt;
cand_array_it++;
}
}
}
}
return cand_array;
}
/**
*
* Receive GPU results & update their support
*
* @param candcnt Candidate count
* @param freqcnt Frequency count
* @param gpu_result Results from GPU kernel invocation
*
*/
void update_sup(int& candcnt, int& freqcnt, int* gpu_result){
list<Eqclass *> *eql = CandK->eqlist();
list<Eqclass *>::iterator ei;
int candIt=0;
for(ei = eql->begin(); ei != eql->end() && !eql->empty();){
Eqclass* eq = *ei;
list<Eqnode>::iterator ni;
for(ni=eq->nlist().begin(); ni!=eq->nlist().end(); ni++){
ni->sup = gpu_result[candIt];
candIt++;
}
// CHECK FREQUENCY
list<Eqnode>::iterator nj;
list<Eqnode>::iterator njj;
nj = remove_if(eq->nlist().begin(), eq->nlist().end(), notfrequent);
eq->nlist().erase(nj, eq->nlist().end());
vector<int>* cand;
int cnt;
int node_tmp;
int depth_pos=0;//the depth position of the parent node of the extension node
freqcnt += eq->nlist().size();
if (eq->nlist().empty()) {
ei = eql->erase(ei);
CandK->count()--;
}
else {
//push to Fk
if (prune_type == prune){
for(njj = eq->nlist().begin(); njj != eq->nlist().end(); njj++){
cnt = -1;
cand = new vector<int>;
for(int j=0; j< eq->prefix().size();j++){
node_tmp = eq->prefix()[j];
if(node_tmp == BranchIt)
cnt--;
else
cnt++;
if(j == njj->pos)
depth_pos = cnt;
(*cand).push_back(eq->prefix()[j]);
}
while(cnt != depth_pos){
(*cand).push_back(BranchIt);
cnt--;
}
(*cand).push_back(njj->val);
freq_cand.insert((*cand));
}
}
ei++;
}
}
if(candIt != candcnt){
cerr << "Error in updating the support" << endl;
exit(0);
}
}
void get_Fk() {
// Fk SETUP
int candcnt=0, freqcnt=0;
// SETUP TIME TRACKER
TimeTracker tt;
double te;
////////////////////////////
/////////GPU////////////////
int* trees_d;
int* tr_start_ind_d;
int* freq_result_d;
int* cand_d;
int* cand_h;
// DEVICE MEMORY ALLOCATION AND COPYING FROM DATABASE
ERROR_CHECK(hipMalloc(&trees_d, DCB->DB_array_size*sizeof(int)));
ERROR_CHECK(hipMemcpy(trees_d, DCB->trees_h, DCB->DB_array_size*sizeof(int), hipMemcpyHostToDevice));
ERROR_CHECK(hipMalloc(&tr_start_ind_d, DBASE_NUM_TRANS*sizeof(int)));
ERROR_CHECK(hipMemcpy(tr_start_ind_d, DCB->tr_start_ind_h, DBASE_NUM_TRANS*sizeof(int), hipMemcpyHostToDevice));
for (int iter = 3; !CandK->isempty(); iter++) {
tt.Start();
CandK->maxdepth() = iter - 1;
candcnt = 0;
freqcnt = 0;
// GENERATE LIST OF CANDIDATES
candidate_generation(iter, CandK, candcnt);
cand_h = create_cand_array(candcnt,iter);
if (candcnt > 0) {
// START KERNEL RUN TIME
kernel_tt.Start();
ERROR_CHECK(hipMalloc(&cand_d, (2*iter-1)*candcnt*sizeof(int)));
ERROR_CHECK(hipMemcpy(cand_d, cand_h, (2*iter-1)*candcnt*sizeof(int), hipMemcpyHostToDevice));
ERROR_CHECK(hipMallocManaged(&freq_result_d, candcnt*sizeof(int)));
ERROR_CHECK(hipMemset(freq_result_d, 0, candcnt*sizeof(int)));
// SET GPU MEMORY DIMENSIONS (BLOCK SIZE, GRID SIZE, CONSTANT MEMORY SIZE)
int threadNum = block_dim;
int blockNum = (DBASE_NUM_TRANS/threadNum > 65535) ? 65535 : (DBASE_NUM_TRANS-1)/threadNum+1;
hipLaunchKernelGGL(( frequency_counter), dim3(blockNum),dim3(threadNum), 0, 0, trees_d, tr_start_ind_d,
DBASE_NUM_TRANS, iter, candcnt, cand_d, freq_result_d);
if ((hipDeviceSynchronize()) != hipSuccess) printf("error in cuda device synchronization\n");
// STOP & APPEND KERNEL RUN TIME TO TOTAL KERNEL RUN TIME
kernel_time += kernel_tt.Stop();
if (prune_type == prune)
FK.clearall();
if (prune_type == prune){
erase_set(freq_cand);
}
update_sup(candcnt,freqcnt,freq_result_d);
// FREE DEVICE MEMORY
hipFree(freq_result_d);
hipFree(cand_d);
}
cout << "F" << iter << " - " << freqcnt << " " << candcnt << endl;
te = tt.Stop();
stats.add(candcnt, freqcnt, te);
}
if (prune_type == prune){
erase_set(freq_cand);
}
}
void create_gpu_stats(){
//making DB for GPU -- map to "trees" variable for GPU
//AND making start point of trees (tr_start_ind_h), the index consider the size of the tree as well
int trees_h_it=0;
int tr_start_ind_h_it=0;
DCB->trees_h = new int[DCB->DB_array_size];
DCB->tr_start_ind_h = new int[DBASE_NUM_TRANS];
for(int i=0; i<DBASE_NUM_TRANS; i++){
DCB->trees_h[trees_h_it] = DCB->DB_array[i][0];
DCB->tr_start_ind_h[tr_start_ind_h_it] = trees_h_it;
tr_start_ind_h_it++;
trees_h_it++;
for(int j=0; j<DCB->DB_array[i][0]; j++){
DCB->trees_h[trees_h_it] = DCB->DB_array[i][j+2];
trees_h_it++;
if(trees_h_it>DCB->DB_array_size){
cerr << "Error in creating DB for GPU" << endl;
exit(0);
}
}
}
}
int main(int argc, char **argv) {
// START RUN TIME
TimeTracker tt;
tt.Start();
// PARSE ARGUMENTS TO VARIABLES
parse_args(argc, argv);
// GENERATE DATABASE AND GET F1 & F2 SEQUENCES
DCB = new Dbase_Ctrl_Blk(infile->c_str());
get_F1();
get_F2();
// GET DEVICE PROPERTIES
hipDeviceProp_t prop;
hipGetDeviceProperties( &prop, 0 );
shared_memory_size=prop.sharedMemPerBlock;
warp_size=prop.warpSize;
// GENERATE GPU STATS
create_gpu_stats();
// RUN TO GET Fk SEQUENCES
get_Fk();
// END RUN TIME
double tottime = tt.Stop();
stats.tottime = tottime;
// WRITE RESULTS TO SUMMARY FILE
ofstream summary(outfile->c_str(), ios::app);
summary << "HTREEMINER ";
switch (sort_type) {
case incr:
summary << "INCR ";
break;
case decr:
summary << "DECR ";
break;
default:
break;
}
switch (prune_type) {
case prune:
summary << "PRUNE ";
break;
deafult: break;
}
if (!count_unique)
summary << "MULTIPLE ";
summary << *infile << " " << MINSUP_PER << " " << DBASE_NUM_TRANS << " "
<< MINSUPPORT << " ";
summary << stats << endl;
summary.close();
// PRINT RESULTS TO CONSOLE
cout << stats << endl;
cout << "TIME = " << tottime << endl;
cout << endl << "Total time = " << tottime << endl;
cout << "Kernel time = " << kernel_time << endl;
cout << "Pre-proc time = " << tottime - kernel_time << endl;
// EXIT SUCCESSFULLY
exit(0);
}
| d7d214a36d1ead4d2b3dac10b10b66879ad05454.cu | //============================================================================
// treeminer.cu
//
// Main file that calls a cuda kernel to compute the SPM tree mining algorithm
//
// Invocation may be read in README.md
//
//============================================================================
// LIBRARIES
#include <string>
#include <unistd.h>
#include <stdio.h>
#include <stack>
#include <list>
#include <iostream>
#include <map>
#include <vector>
#include <set>
// HEADERS
#include "treeminer.h"
#include "timetrack.h"
#include "calcdb.h"
#include "eqclass.h"
#include "hashtree.h"
#include "stats.h"
#include "cuda.h"
#include "tools.h"
// CUDA KERNEL
#include "cuda_kernel.cu"
// GPU VARIABLES
int warp_size=0;
int shared_memory_size=0; // in bytes
int node_size=-1;
int block_dim=512;
int maxNodeSz=200;
int blk_max_size=0;
// TIMING
TimeTracker kernel_tt;
double kernel_time;
TimeTracker preproc_tt;
double preproc_time;
// GLOBAL VARIABLES
string *infile;
string *outfile = new string("summary.out");
HashTree *CandK = NULL;
FreqHT FK;
Dbase_Ctrl_Blk *DCB;
Stats stats;
typedef vector<bool> bit_vector;
int tot_trans_cnt=0; //total number of valid transactions
map<int, int> treeSz_loc_mp; //each set of tree size ends where in the DB_array
double MINSUP_PER;
int MINSUPPORT = -1;
int DBASE_MAXITEM;
int DBASE_NUM_TRANS;
// DEFAULT FLAGS
bool output_console = false; //don't print freq subtrees to console
bool count_unique = true; //count support only once per tree
sort_vals sort_type = nosort; //default is to sort in increasing order
prune_vals prune_type = prune; //prune candidates by default
set<vector<int> > freq_cand;
vector<int> *ITCNT = NULL; //used for sorting F1
bool F1cmp(int x, int y) {
bool res = false;
if ((*ITCNT)[x] < (*ITCNT)[y])
res = true;
if (sort_type == incr)
return res;
else
return !res;
}
/**
*
* Parses arguments based on user set flags
*
* Flags:
* -i, input file of tree dataset
* -s, support threshold between (0,1)
* -c, <True> if printing the frequent subtrees to console. Default is <False>
* -p, <True> if pruning the database, <False> otherwise. Default is <True>
* -u, <True> if counting the subtree matches once per tree, <False> if weighted counting. Default is <True>
* -o, output file for results summary. Output is appended, not overwritten. Default is "summary.out"
*
*/
void parse_args(int argc, char **argv) {
extern char * optarg;
int c;
if (argc < 5){
cout << "usage: gpuTreeMiner -i<input_file> -s<support> -c<print output> -p<prune> -u<unique counting> -o<output_file>\n";
cout << " -i, dataset of trees\n";
cout << " -s, support threshold between (0,1)\n";
cout << " -c, <True> if printing the frequent subtrees to console. Default is <False> \n";
cout << " -p, <True> if pruning the database, <False> otherwise. Default is <True> \n";
cout << " -u, <True> if counting the subtree matches once per tree, <False> if weighted counting. Default is <True> \n";
cout << " -o, output file for results summary. Output is appended, not overwritten. Default is \"summary.out\"";
exit(0);
}
else {
while ((c = getopt(argc, argv, "bh:i:cp:s:S:uz:o:")) != -1) {
switch (c) {
case 'b':
Dbase_Ctrl_Blk::binary_input = true;
break;
case 'h': //hash threshold
HashTree::threshold() = atoi(optarg);
break;
case 'i': //input files
infile = new string(optarg);
break;
case 'c': //print freq subtrees
output_console = true;
break;
case 'p':
prune_type = (prune_vals) atoi(optarg);
break;
case 's': //support value for L2
MINSUP_PER = atof(optarg);
break;
case 'S': //absolute support
MINSUPPORT = atoi(optarg);
break;
case 'u': //count support multiple times per tree
count_unique = false;
break;
case 'z':
sort_type = (sort_vals) atoi(optarg);
break;
case 'o':
outfile = new string(optarg);
break;
}
}
}
}
void erase_set(set<vector<int> > &freq_set){
freq_set.erase(freq_set.begin(),freq_set.end());
}
/**
*
* F1 Frequency Generation
*
* Initial frequency generation and pruning from database using minsup value
*
*/
void get_F1() {
// SETUP TIME TRACKER
TimeTracker tt;
double te;
// F1 SETUP
int i, j, it;
vector<int> itcnt;
vector<int> flgs;
tt.Start();
DBASE_MAXITEM = 0;
DBASE_NUM_TRANS = 0;
while (DCB->get_next_trans()) {
for (i = 0; i < DCB->TransSz; i++) {
it = DCB->TransAry[i];
if (it != BranchIt) {
if (it >= DBASE_MAXITEM) {
for (j = DBASE_MAXITEM; j <= it; j++) {
itcnt.push_back(0);
flgs.push_back(-1);
}
DBASE_MAXITEM = it + 1;
}
if (count_unique) {
if (flgs[it] == DCB->Cid)
continue;
else
flgs[it] = DCB->Cid;
}
itcnt[it]++;
}
}
if (DCB->MaxTransSz < DCB->TransSz)
DCB->MaxTransSz = DCB->TransSz;
DBASE_NUM_TRANS++;
}
// SET VALUE OF MINSUPPORT
if (MINSUPPORT == -1)
MINSUPPORT = (int) (MINSUP_PER * DBASE_NUM_TRANS + 0.5);
if (MINSUPPORT < 1)
MINSUPPORT = 1;
cout << "DBASE_NUM_TRANS : " << DBASE_NUM_TRANS << endl;
cout << "DBASE_MAXITEM : " << DBASE_MAXITEM << endl;
cout << "MINSUPPORT : " << MINSUPPORT << " (" << MINSUP_PER << ")" << endl;
// COUNT NUMBER OF FREQUENT ITEMS
DCB->NumF1 = 0;
for (i = 0; i < DBASE_MAXITEM; i++)
if (itcnt[i] >= MINSUPPORT)
DCB->NumF1++;
int *it_order = new int[DBASE_MAXITEM];
for (i = 0; i < DBASE_MAXITEM; i++)
it_order[i] = i;
if (sort_type != nosort) {
ITCNT = &itcnt;
sort(&it_order[0], &it_order[DBASE_MAXITEM], F1cmp);
}
// CONSTRUCT FORWARD & REVERSE MAPPNG FROM ITEMS TO FREQ ITEMS
DCB->FreqIdx = new int[DCB->NumF1];
DCB->FreqMap = new int[DBASE_MAXITEM];
for (i = 0, j = 0; i < DBASE_MAXITEM; i++) {
if (itcnt[it_order[i]] >= MINSUPPORT) {
if (output_console)
cout << i << " - " << itcnt[it_order[i]] << endl;
DCB->FreqIdx[j] = it_order[i];
DCB->FreqMap[it_order[i]] = j;
j++;
} else
DCB->FreqMap[it_order[i]] = -1;
}
//console output F1 summary
cout << "F1 - " << DCB->NumF1 << " " << DBASE_MAXITEM << endl;
if (sort_type != nosort) {
ITCNT = NULL;
delete[] it_order;
}
te = tt.Stop();
stats.add(DBASE_MAXITEM, DCB->NumF1, te);
}
/**
*
* Output pointer array to console
*
* @param array Pointer array of type int
* @param size Size of the array
*
*/
void print_array(int* array, int size) {
for (int i=0; i<size; i++) {
cout << array[i] << " ";
}
cout << endl;
}
void get_F2() {
int i, j;
int it1, it2;
int scnt;
int tree_id=0;
// SETUP TIMETRACKER
TimeTracker tt;
double te;
// START F2 TIME
tt.Start();
//itcnt2 is a matrix of pairs p, p.first is count, p.second is flag
int **itcnt2 = new int*[DCB->NumF1];
int **flgs = new int*[DCB->NumF1];
for (i = 0; i < DCB->NumF1; i++) {
itcnt2[i] = new int[DCB->NumF1];
flgs[i] = new int[DCB->NumF1];
for (j = 0; j < DCB->NumF1; j++) {
itcnt2[i][j] = 0;
flgs[i][j] = -1;
}
}
// CREATE DB ARRAY
DCB->DB_array = new int*[DBASE_NUM_TRANS];
multimap<int, int> tree_sz_mp; //key: size of the tree, value: list of tree of that size. For sorting the dataset
vector<int>* freqCand;
int nod_num = 0;
while (DCB->get_next_trans()) {
nod_num = 0;
DCB->get_valid_trans();
//Creating DB array with the valid transaction (removing infrequent items), the size of the transaction is decreased here
if(DCB->TransSz > 1) {
tot_trans_cnt++;
DCB->DB_array[tree_id] = new int[DCB->TransSz + 2];
DCB->DB_array_size+= DCB->TransSz+1;
DCB->DB_array[tree_id][0] = DCB->TransSz;
for (int trans_iter = 0; trans_iter < DCB->TransSz; trans_iter++) {
DCB->DB_array[tree_id][trans_iter + 2] = DCB->TransAry[trans_iter];
if(DCB->TransAry[trans_iter] != BranchIt){ //Number of nodes in one transaction
nod_num++;
}
}
DCB->DB_array[tree_id][1]=nod_num;
tree_sz_mp.insert(pair<int,int>(nod_num,tree_id));
tree_id++;
//count a pair only once per cid
for (i = 0; i < DCB->TransSz; i++) {
it1 = DCB->TransAry[i];
if (it1 != BranchIt && it1 != DCB->NumF1) {
scnt = 0;
for (j = i + 1; scnt >= 0 && j < DCB->TransSz; j++) {
it2 = DCB->TransAry[j];
if (it2 != BranchIt && it2 != DCB->NumF1) {
scnt++;
if (count_unique) {
if (flgs[it1][it2] == DCB->Cid)
continue;
else
flgs[it1][it2] = DCB->Cid;
}
itcnt2[it1][it2]++;
} else
scnt--;
}
}
}
} else
continue;
}
// SORT DATASET & MAKE treeSz_loc_map TO SEE TREE SIZE LOCATION IN DATABASE
int** DB_array_tmp = new int*[tree_id];
int loc_in_sorted_db=0;
// SORT DB_array
for(multimap<int, int>::iterator it=tree_sz_mp.begin(); it!=tree_sz_mp.end();it=tree_sz_mp.upper_bound(it->first)){
pair<multimap<int, int>::iterator, multimap<int, int>::iterator> eql_rng = tree_sz_mp.equal_range(it->first);
for(multimap<int, int>::iterator it2=eql_rng.first;it2!=eql_rng.second;it2++){
DB_array_tmp[loc_in_sorted_db] = DCB->DB_array[it2->second];
loc_in_sorted_db++;
}
treeSz_loc_mp[it->first]=loc_in_sorted_db-1;
}
DCB->DB_array = DB_array_tmp;
DBASE_NUM_TRANS = tree_id;
int F2cnt = 0;
// GENERATE NEW HASHTREE
CandK = new HashTree(0);
CandK->maxdepth() = 1;
if (prune_type == prune)
FK.clearall();
// COUNT FREQUENT PATTERNS & GENERATE eqclass
Eqclass *eq;
for (i = 0; i < DCB->NumF1; i++) {
eq = NULL;
for (j = 0; j < DCB->NumF1; j++) {
if (itcnt2[i][j] >= MINSUPPORT) {
F2cnt++;
if (eq == NULL) {
eq = new Eqclass();
eq->prefix().push_back(i);
}
eq->add_node(j, 0, itcnt2[i][j]);
if (prune_type == prune){
freqCand = new vector<int>;
freqCand->push_back(i);
freqCand->push_back(j);
freq_cand.insert(*freqCand);
}
}
if (eq != NULL) {
if (output_console)
cout << DCB->FreqIdx[i] << " " << DCB->FreqIdx[j] << " - "
<< itcnt2[i][j] << endl;
}
}
if (eq != NULL) {
CandK->add_element(eq);
CandK->eqlist()->push_front(eq);
CandK->count()++;
}
}
// FREE MEMORY
for (i = 0; i < DCB->NumF1; i++) {
delete[] itcnt2[i];
delete[] flgs[i];
}
delete[] itcnt2;
delete[] flgs;
cout << "F2 - " << F2cnt << " " << DCB->NumF1 * DCB->NumF1 << endl;
// LOG F2 END TIME
te = tt.Stop();
stats.add(DCB->NumF1 * DCB->NumF1, F2cnt, te);
}
/**
*
* Add node to Eqclass, checks for pruning
*
*/
void add_node(int iter, Eqclass *neq, int val, int pos) {
if (prune_type == noprune) {
//don't do any pruning
neq->add_node(val, pos);
return;
}
// PRUNE BASED ON FREQUENT SUBTREE
static vector<int> cand;
static vector<int> subtree;
int hval;
int scope, scnt;
// FORM THE CANDIDATE PREFIX
cand = neq->prefix();
scnt = neq->get_scope(pos, scope); //checks the scope of node.pos
while (scnt > scope) {
cand.push_back(BranchIt);
scnt--;
}
cand.push_back(val);
int cnt=0;
vector<int> candTmp;
vector<int>::iterator it1, it2;
// PRUNING
candTmp = cand;
int num_root_visiting=0; //used for checking if deleting root node or not
// CHECKING THE ROOT
if(find(candTmp.begin(), candTmp.end(), BranchIt) == candTmp.end()){
for(it1=candTmp.begin(); it1 != candTmp.end()-1; it1++){
candTmp.erase(it1);
if(freq_cand.find(candTmp) == freq_cand.end()){
return;
}
candTmp.clear();
candTmp = cand;
}
}
// CHECKING THE ROOT CANDIDATE
candTmp.clear();
candTmp = cand;
cnt=0;
it1=candTmp.begin();
for(it2=candTmp.begin()+1; it2 != candTmp.end(); it2++){
if(*it2 != BranchIt){
cnt++;
}
else{
cnt--;
if(cnt == 0)
num_root_visiting++;
}
}
if(num_root_visiting==0){
candTmp.erase(it1);
if(freq_cand.find(candTmp) == freq_cand.end()){
return;
}
candTmp.clear();
candTmp = cand;
}
// CHECKING THE REST OF THE NODES
for(it1=candTmp.begin()+1; it1 != candTmp.end()-1; it1++){
if(*it1 != BranchIt){
cnt=0;
for(it2=it1+1; it2 != candTmp.end(); it2++){
if(*it2 != BranchIt){
cnt++;
}
else{
cnt--;
if(cnt==-1){
candTmp.erase(it1);
candTmp.erase(it2-1);
break;
}
}
}
if(candTmp.size() == cand.size()){
candTmp.erase(it1);
}
if(freq_cand.find(candTmp) == freq_cand.end()){
return;
}
candTmp.clear();
candTmp = cand;
}
}
//otherwise add the node
neq->add_node(val, pos);
}
void cand_gen(int iter, Eqclass &eq, list<Eqclass *> &neweql) {
Eqclass *neq;
list<Eqnode>::iterator ni, nj;
for (ni = eq.nlist().begin(); ni != eq.nlist().end(); ++ni) {
neq = NULL;
for (nj = eq.nlist().begin(); nj != eq.nlist().end(); ++nj) {
if (ni->pos < nj->pos)
continue;
if (neq == NULL) {
neq = new Eqclass;
neq->set_prefix(eq.prefix(), *ni);
}
if (ni->pos > nj->pos)
add_node(iter, neq, nj->val, nj->pos);
else {
add_node(iter, neq, nj->val, nj->pos);
add_node(iter, neq, nj->val, neq->prefix().size() - 1);
}
}
if (!neq->nlist().empty()) {
neweql.push_back(neq);
} else
delete neq;
}
}
void candidate_generation(int iter, HashTree *ht, int &candcnt) {
list<Eqclass *> *oldeql = ht->eqlist();
list<Eqclass *> *neweql = new list<Eqclass *>;
Eqclass *eq;
ht->flag() = -1; //reset the flag
while (!oldeql->empty()) {
eq = oldeql->front();
cand_gen(iter, *eq, *neweql);
delete eq;
ht->count()--;
oldeql->pop_front();
}
list<Eqclass *>::iterator ni;
for (ni = neweql->begin(); ni != neweql->end(); ni++) {
ht->eqlist()->push_back(*ni);
ht->count()++;
candcnt += (*ni)->nlist().size();
}
delete neweql;
}
ostream & operator<<(ostream& fout, vector<int> &vec) {
fout << vec[0];
for (int i = 1; i < vec.size(); i++)
fout << " " << vec[i];
return fout;
}
bool incr_nodes(Eqclass *eq, int tpos, int tscope, stack<int> &stk,
bit_vector &cflgs) {
int i, f, st, en, l;
bool retval = false;
int fcnt = 0;
int scope, ttscope, ttpos;
stack<int> tstk;
list<Eqnode>::iterator ni = eq->nlist().begin();
for (f = 0; ni != eq->nlist().end(); ni++, f++) {
//if unique counts and node has been counted, skip to next node
if (count_unique && cflgs[f]) {
fcnt++;
continue;
}
ttscope = tscope;
scope = ttscope;
ttpos = tpos;
bool skip = false;
int st, en;
en = eq->get_scope(ni->pos, st);
if (en > st) {
skip = true;
while (en > st) {
st++;
tstk.push(stk.top());
stk.pop();
}
ttscope = tstk.top();
}
while (skip && scope >= ttscope && ttpos < DCB->DB_array[DCB->db_iter][0]) {
if (DCB->DB_array[DCB->db_iter][ttpos + 1] == BranchIt)
scope--;
else
scope++;
ttpos++;
}
if (skip)
ttscope = stk.top();
//search for the last item within cur_scope
for (i = ttpos; i < DCB->DB_array[DCB->db_iter][0]; i++) {
if (DCB->DB_array[DCB->db_iter][i + 1] == BranchIt)
scope--;
else
scope++;
if (scope < ttscope)
break;
if (ni->val == DCB->DB_array[DCB->db_iter][i + 1]) {
if (count_unique) {
if (!cflgs[f]) {
cflgs[f] = true;
fcnt++;
ni->sup++;
}
} else
ni->sup++;
}
}
while (!tstk.empty()) {
stk.push(tstk.top());
tstk.pop();
}
}
//all nodes have been seen
if (count_unique && fcnt == cflgs.size())
retval = true;
return retval;
}
bool incr_support(Eqclass *eq, int tpos, int ppos, int tscope, stack<int> &stk,
bit_vector &cflgs) {
int i;
int scope, ttscope;
stack<int> tstk;
scope = tscope;
bool skip = false;
if (eq->prefix()[ppos] == BranchIt) {
skip = true;
while (eq->prefix()[ppos] == BranchIt) {
tstk.push(stk.top());
stk.pop();
ppos++;
}
tscope = tstk.top();
}
while (skip && scope >= tscope && tpos < DCB->DB_array[DCB->db_iter][0]) {
if (DCB->DB_array[DCB->db_iter][tpos + 1] == BranchIt)
scope--;
else
scope++;
tpos++;
}
if (skip)
tscope = stk.top();
bool allfound = false;
for (i = tpos; i < DCB->DB_array[DCB->db_iter][0] && !allfound; i++) {
if (DCB->DB_array[DCB->db_iter][i + 1] == BranchIt)
scope--;
else
scope++;
if (scope < tscope)
break;
if (DCB->DB_array[DCB->db_iter][i + 1] == eq->prefix()[ppos]) {
stk.push(scope);
if (ppos == eq->prefix().size() - 1) {
allfound = incr_nodes(eq, i + 1, scope, stk, cflgs);
} else {
allfound = incr_support(eq, i + 1, ppos + 1, scope, stk, cflgs);
}
stk.pop();
}
}
while (!tstk.empty()) {
stk.push(tstk.top());
tstk.pop();
}
return allfound;
}
/**
*
* Returns false if Eqnode support is greater than or equal to the minimum support threshold, true if otherwise
*
*/
static bool notfrequent(Eqnode &n) {
if (n.sup >= MINSUPPORT)
return false;
else
return true;
}
bool get_frequent(int iter, HashTree *ht, int &freqcnt) {
int i;
bool empty_leaf = false;
if (ht->isleaf()) {
list<Eqclass *> *eql = ht->eqlist();
Eqclass *eq;
list<Eqclass *>::iterator ni;
for (ni = eql->begin(); ni != eql->end() && !eql->empty();) {
eq = *ni;
list<Eqnode>::iterator nj;
nj = remove_if(eq->nlist().begin(), eq->nlist().end(), notfrequent);
eq->nlist().erase(nj, eq->nlist().end());
freqcnt += eq->nlist().size();
if (output_console && !eq->nlist().empty())
eq->print(DCB);
if (eq->nlist().empty()) {
ni = eql->erase(ni);
CandK->count()--;
}
else {
if (prune_type == prune)
FK.add(eq);
ni++;
}
}
if (eql->empty())
empty_leaf = true;
} else {
HTable::iterator ti, hi = ht->htable().begin();
int ecnt = 0;
for (; hi != ht->htable().end();) {
bool ret = get_frequent(iter, (*hi).second, freqcnt);
if (ret) {
ecnt++;
ti = hi;
hi++;
ht->htable().erase(ti);
} else
hi++;
}
}
return empty_leaf;
}
int* create_cand_array(int candcnt, int iter){
int* cand_array = new int[candcnt*(2*iter-1)];
int cand_array_it = 0;
int lastBranchDepth = 0;
list<Eqclass *> *eql = CandK->eqlist();
list<Eqclass *>::iterator ei;
vector<int> righ_path_pos; //position in the prefix vector
for(ei = eql->begin(); ei != eql->end(); ei++){
Eqclass* eq = *ei;
list<Eqnode>::iterator ni;
// CREATE RIGHTMOST PATH
righ_path_pos.clear();
for(int i=0; i<eq->prefix().size();i++){
if(eq->prefix()[i] != BranchIt){
righ_path_pos.push_back(i);
}
else{
righ_path_pos.pop_back();
}
}
for(ni=eq->nlist().begin(); ni!=eq->nlist().end(); ni++){
//copy the prefix
for(int i=0; i<eq->prefix().size();i++){
cand_array[cand_array_it] = eq->prefix()[i];
cand_array_it++;
}
//add the extension node and the remaining branches
for(int i=righ_path_pos.size()-1; i>-1;i--){
if(ni->pos == righ_path_pos[i]){
cand_array[cand_array_it] = ni->val;
cand_array_it++;
cand_array[cand_array_it] = BranchIt;
cand_array_it++;
}
else{
cand_array[cand_array_it] = BranchIt;
cand_array_it++;
}
}
}
}
return cand_array;
}
/**
*
* Receive GPU results & update their support
*
* @param candcnt Candidate count
* @param freqcnt Frequency count
* @param gpu_result Results from GPU kernel invocation
*
*/
void update_sup(int& candcnt, int& freqcnt, int* gpu_result){
list<Eqclass *> *eql = CandK->eqlist();
list<Eqclass *>::iterator ei;
int candIt=0;
for(ei = eql->begin(); ei != eql->end() && !eql->empty();){
Eqclass* eq = *ei;
list<Eqnode>::iterator ni;
for(ni=eq->nlist().begin(); ni!=eq->nlist().end(); ni++){
ni->sup = gpu_result[candIt];
candIt++;
}
// CHECK FREQUENCY
list<Eqnode>::iterator nj;
list<Eqnode>::iterator njj;
nj = remove_if(eq->nlist().begin(), eq->nlist().end(), notfrequent);
eq->nlist().erase(nj, eq->nlist().end());
vector<int>* cand;
int cnt;
int node_tmp;
int depth_pos=0;//the depth position of the parent node of the extension node
freqcnt += eq->nlist().size();
if (eq->nlist().empty()) {
ei = eql->erase(ei);
CandK->count()--;
}
else {
//push to Fk
if (prune_type == prune){
for(njj = eq->nlist().begin(); njj != eq->nlist().end(); njj++){
cnt = -1;
cand = new vector<int>;
for(int j=0; j< eq->prefix().size();j++){
node_tmp = eq->prefix()[j];
if(node_tmp == BranchIt)
cnt--;
else
cnt++;
if(j == njj->pos)
depth_pos = cnt;
(*cand).push_back(eq->prefix()[j]);
}
while(cnt != depth_pos){
(*cand).push_back(BranchIt);
cnt--;
}
(*cand).push_back(njj->val);
freq_cand.insert((*cand));
}
}
ei++;
}
}
if(candIt != candcnt){
cerr << "Error in updating the support" << endl;
exit(0);
}
}
void get_Fk() {
// Fk SETUP
int candcnt=0, freqcnt=0;
// SETUP TIME TRACKER
TimeTracker tt;
double te;
////////////////////////////
/////////GPU////////////////
int* trees_d;
int* tr_start_ind_d;
int* freq_result_d;
int* cand_d;
int* cand_h;
// DEVICE MEMORY ALLOCATION AND COPYING FROM DATABASE
ERROR_CHECK(cudaMalloc(&trees_d, DCB->DB_array_size*sizeof(int)));
ERROR_CHECK(cudaMemcpy(trees_d, DCB->trees_h, DCB->DB_array_size*sizeof(int), cudaMemcpyHostToDevice));
ERROR_CHECK(cudaMalloc(&tr_start_ind_d, DBASE_NUM_TRANS*sizeof(int)));
ERROR_CHECK(cudaMemcpy(tr_start_ind_d, DCB->tr_start_ind_h, DBASE_NUM_TRANS*sizeof(int), cudaMemcpyHostToDevice));
for (int iter = 3; !CandK->isempty(); iter++) {
tt.Start();
CandK->maxdepth() = iter - 1;
candcnt = 0;
freqcnt = 0;
// GENERATE LIST OF CANDIDATES
candidate_generation(iter, CandK, candcnt);
cand_h = create_cand_array(candcnt,iter);
if (candcnt > 0) {
// START KERNEL RUN TIME
kernel_tt.Start();
ERROR_CHECK(cudaMalloc(&cand_d, (2*iter-1)*candcnt*sizeof(int)));
ERROR_CHECK(cudaMemcpy(cand_d, cand_h, (2*iter-1)*candcnt*sizeof(int), cudaMemcpyHostToDevice));
ERROR_CHECK(cudaMallocManaged(&freq_result_d, candcnt*sizeof(int)));
ERROR_CHECK(cudaMemset(freq_result_d, 0, candcnt*sizeof(int)));
// SET GPU MEMORY DIMENSIONS (BLOCK SIZE, GRID SIZE, CONSTANT MEMORY SIZE)
int threadNum = block_dim;
int blockNum = (DBASE_NUM_TRANS/threadNum > 65535) ? 65535 : (DBASE_NUM_TRANS-1)/threadNum+1;
frequency_counter<<<blockNum,threadNum>>>(trees_d, tr_start_ind_d,
DBASE_NUM_TRANS, iter, candcnt, cand_d, freq_result_d);
if ((cudaDeviceSynchronize()) != cudaSuccess) printf("error in cuda device synchronization\n");
// STOP & APPEND KERNEL RUN TIME TO TOTAL KERNEL RUN TIME
kernel_time += kernel_tt.Stop();
if (prune_type == prune)
FK.clearall();
if (prune_type == prune){
erase_set(freq_cand);
}
update_sup(candcnt,freqcnt,freq_result_d);
// FREE DEVICE MEMORY
cudaFree(freq_result_d);
cudaFree(cand_d);
}
cout << "F" << iter << " - " << freqcnt << " " << candcnt << endl;
te = tt.Stop();
stats.add(candcnt, freqcnt, te);
}
if (prune_type == prune){
erase_set(freq_cand);
}
}
void create_gpu_stats(){
//making DB for GPU -- map to "trees" variable for GPU
//AND making start point of trees (tr_start_ind_h), the index consider the size of the tree as well
int trees_h_it=0;
int tr_start_ind_h_it=0;
DCB->trees_h = new int[DCB->DB_array_size];
DCB->tr_start_ind_h = new int[DBASE_NUM_TRANS];
for(int i=0; i<DBASE_NUM_TRANS; i++){
DCB->trees_h[trees_h_it] = DCB->DB_array[i][0];
DCB->tr_start_ind_h[tr_start_ind_h_it] = trees_h_it;
tr_start_ind_h_it++;
trees_h_it++;
for(int j=0; j<DCB->DB_array[i][0]; j++){
DCB->trees_h[trees_h_it] = DCB->DB_array[i][j+2];
trees_h_it++;
if(trees_h_it>DCB->DB_array_size){
cerr << "Error in creating DB for GPU" << endl;
exit(0);
}
}
}
}
int main(int argc, char **argv) {
// START RUN TIME
TimeTracker tt;
tt.Start();
// PARSE ARGUMENTS TO VARIABLES
parse_args(argc, argv);
// GENERATE DATABASE AND GET F1 & F2 SEQUENCES
DCB = new Dbase_Ctrl_Blk(infile->c_str());
get_F1();
get_F2();
// GET DEVICE PROPERTIES
cudaDeviceProp prop;
cudaGetDeviceProperties( &prop, 0 );
shared_memory_size=prop.sharedMemPerBlock;
warp_size=prop.warpSize;
// GENERATE GPU STATS
create_gpu_stats();
// RUN TO GET Fk SEQUENCES
get_Fk();
// END RUN TIME
double tottime = tt.Stop();
stats.tottime = tottime;
// WRITE RESULTS TO SUMMARY FILE
ofstream summary(outfile->c_str(), ios::app);
summary << "HTREEMINER ";
switch (sort_type) {
case incr:
summary << "INCR ";
break;
case decr:
summary << "DECR ";
break;
default:
break;
}
switch (prune_type) {
case prune:
summary << "PRUNE ";
break;
deafult: break;
}
if (!count_unique)
summary << "MULTIPLE ";
summary << *infile << " " << MINSUP_PER << " " << DBASE_NUM_TRANS << " "
<< MINSUPPORT << " ";
summary << stats << endl;
summary.close();
// PRINT RESULTS TO CONSOLE
cout << stats << endl;
cout << "TIME = " << tottime << endl;
cout << endl << "Total time = " << tottime << endl;
cout << "Kernel time = " << kernel_time << endl;
cout << "Pre-proc time = " << tottime - kernel_time << endl;
// EXIT SUCCESSFULLY
exit(0);
}
|
c0c6f361fc700b72d11a9cd8990b0364b3a81f1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/conv_conjugate_norm_layer.hpp"
namespace caffe {
template <typename Dtype>
Dtype FindMax(const int count, const Dtype* inp) {
Dtype maxinp = abs(inp[0]);
for (int i=1; i<count; i++){
if (abs(inp[i]) > maxinp){
maxinp = abs(inp[i]);
}
}
return maxinp;
}
template <typename Dtype>
void PNorm(const int count, const Dtype p, const Dtype* inp,
Dtype* f_buff, Dtype &n_buff, Dtype* out) {
caffe_gpu_abs(count, inp, f_buff);
//caffe_gpu_add_scalar(dim, Dtype(1e-10), b_buff_data);
caffe_gpu_powx(count, f_buff, p, out);
Dtype denom;
caffe_gpu_asum(count, out, &denom);
denom = pow(denom, 1./p) + Dtype(1e-10);
n_buff = denom;
caffe_gpu_scale(count, Dtype(1./denom), inp, out);
}
template <typename Dtype>
__global__ void NormScaleOut(const bool spatial, const int k_size,
const int count, const int channels, const int height,
const int width, const Dtype* inp, const Dtype* n_buff, Dtype* out) {
CUDA_KERNEL_LOOP(index, count){
if (spatial){
int num_H = ceil(float(height) / k_size);
int num_W = ceil(float(width) / k_size);
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int ch = floor(float(h) / k_size);
int cw = floor(float(w) / k_size);
int ph = h % k_size;
int pw = w % k_size;
int inp_ind = ((((n*channels+c)*num_H+ch)*num_W+cw)*k_size+ph)*k_size+pw;
int out_ind = ((n*channels+c)*height+h)*width+w;
out[out_ind] = inp[inp_ind] / n_buff[((n*channels+c)*num_H+ch)*num_W+cw];
}else{
int dim = height*width;
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int inp_ind = (n*dim+h*width+w)*channels+c;
int out_ind = (n*channels+c)*dim+h*width+w;
out[out_ind] = inp[inp_ind] / n_buff[n*dim+h*width+w];
}
}
}
// CUDA kernel for im2col
template <typename Dtype>
__global__ void im2colgpu(const bool spatial, const int k_size,
const int count, const int channels, const int height,
const int width, const Dtype* inp, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
if (spatial){
int num_H = ceil(float(height) / k_size);
int num_W = ceil(float(width) / k_size);
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int ch = floor(float(h) / k_size);
int cw = floor(float(w) / k_size);
int ph = h % k_size;
int pw = w % k_size;
int out_ind = ((((n*channels+c)*num_H+ch)*num_W+cw)*k_size+ph)*k_size+pw;
int inp_ind = ((n*channels+c)*height+h)*width+w;
out[out_ind] = inp[inp_ind];
}else{
int dim = height*width;
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int out_ind = (n*dim+h*width+w)*channels+c;
int inp_ind = (n*channels+c)*dim+h*width+w;
out[out_ind] = inp[inp_ind];
}
}
}
// CUDA kernel for im2col
template <typename Dtype>
__global__ void col2imgpu(const bool spatial, const int k_size,
int count, const int channels, const int height,
const int width, const Dtype* inp, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
if (spatial){
int num_H = ceil(float(height) / k_size);
int num_W = ceil(float(width) / k_size);
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int ch = floor(float(h) / k_size);
int cw = floor(float(w) / k_size);
int ph = h % k_size;
int pw = w % k_size;
int inp_ind = ((((n*channels+c)*num_H+ch)*num_W+cw)*k_size+ph)*k_size+pw;
int out_ind = ((n*channels+c)*height+h)*width+w;
out[out_ind] = inp[inp_ind];
}else{
int dim = height*width;
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int inp_ind = (n*dim+h*width+w)*channels+c;
int out_ind = (n*channels+c)*dim+h*width+w;
out[out_ind] = inp[inp_ind];
}
}
}
template <typename Dtype>
void ConvConjugateNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
Dtype* p_data = this->blobs_[0]->mutable_cpu_data();
Dtype* n_buff = norm_buff_.mutable_gpu_data();
Dtype* n_buff_diff = norm_buff_.mutable_gpu_diff();
Dtype* f_buff = forward_buff_.mutable_gpu_data();
Dtype* norm_inp = norm_inp_.mutable_gpu_data();
Dtype* norm_inp_diff = norm_inp_.mutable_gpu_diff();
Dtype* norm_opt = norm_opt_.mutable_gpu_data();
hipLaunchKernelGGL(( im2colgpu<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, spatial, k_size, count,
channels, bottom[0]->height(), bottom[0]->width(), bottom_data, norm_inp);
CUDA_POST_KERNEL_CHECK;
if (channel_shared_){
//LOG(INFO) << "shared_norm!";
Dtype p = p_data[0];
if (exp_p){
p = exp(p);
}
if (p > max_p){
p = max_p;
p_data[0] = max_p;
if (exp_p){
p_data[0] = log(max_p);
}
}
if (p < min_p){
p = min_p;
p_data[0] = min_p;
if (exp_p){
p_data[0] = log(min_p);
}
}
Dtype q = p / (p - 1.);
caffe_gpu_abs(count, norm_inp, norm_opt);
caffe_gpu_powx(count, norm_opt, Dtype(q), norm_inp_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, num*dim, sp_dim, 1.,
norm_inp_.gpu_diff(), multiplier_.gpu_data(), 0.,
n_buff_diff);
caffe_gpu_powx(num*dim, n_buff_diff, Dtype(1./q), n_buff);
caffe_gpu_add_scalar(num*dim, Dtype(1e-10), n_buff);
//for(int i=0; i<5; i++){
// LOG(INFO) << "n_buff: "<< norm_buff_.cpu_data()[i];
//}
hipLaunchKernelGGL(( NormScaleOut<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), norm_inp, n_buff, top_data);
CUDA_POST_KERNEL_CHECK;
}else{
const int div_factor = channel_shared_ ? dim : 1;
for (int i = 0; i < num*dim; ++i) {
int c = i % dim / div_factor;
Dtype p = p_data[c];
if (exp_p){
p = exp(p);
}
if (p > max_p){
p = max_p;
p_data[c] = max_p;
if (exp_p){
p_data[c] = log(max_p);
}
}
if (p < min_p){
p = min_p;
p_data[c] = min_p;
if (exp_p){
p_data[c] = log(min_p);
}
}
if (p < 1.25){
n_buff[i] = FindMax(sp_dim, norm_inp+i*dim);
caffe_gpu_scale(sp_dim, Dtype(1./n_buff[i]), norm_inp, norm_opt);
}else{
Dtype q = p / (p - 1.);
PNorm(sp_dim, q, norm_inp+i*sp_dim, f_buff, n_buff[i], norm_opt+i*sp_dim);
}
}
hipLaunchKernelGGL(( col2imgpu<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), norm_opt, top_data);
CUDA_POST_KERNEL_CHECK;
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void ConjugateNormBackward2(const int n, const Dtype pnorm,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
if(abs(in_data[index]) == pnorm){
out_diff[index] = 0.0;
}else{
out_diff[index] = in_diff[index] / pnorm;
}
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void ConjugateNormSharedBackward(const int n, const int channels,
const Dtype p, const Dtype* pnorm, const Dtype* inpxdiff, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
int c = index / channels;
out_diff[index] = in_diff[index] / pnorm[c] - inpxdiff[c] * in_data[index]
* pow((abs(in_data[index])+Dtype(1e-20)),(p-2)) / pow((pnorm[c]+Dtype(1e-10)), (p+1));
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void ConjugateNormBackward(const int n, const Dtype p,
const Dtype pnorm, const Dtype inpxdiff, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] / pnorm - inpxdiff * in_data[index]
* pow((abs(in_data[index])+Dtype(1e-20)),(p-2)) / pow((pnorm+Dtype(1e-10)), (p+1));
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void ConjugateNormParamBackward2(const int n,
const Dtype pnorm, const Dtype m, const Dtype exp_diff,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] += in_diff[index] * in_data[index] / pnorm * m * exp_diff;
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void ConjugateNormParamBackward1(const int n, const int channels,
const Dtype exp_diff, const Dtype* pnorm, const Dtype* m, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
int c = index / channels;
out_diff[index] = in_diff[index] * in_data[index] / pnorm[c] * m[c] * exp_diff;
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void CalculateS3(const int n, const int channels,
const Dtype* inp_1, const Dtype* inp_2, const Dtype* s1,
const Dtype* s2, Dtype* s3) {
CUDA_KERNEL_LOOP(index, n) {
for(int c=0; c<channels; c++){
s3[index] += inp_1[index*channels+c]*inp_2[index*channels+c];
}
s3[index] = s1[index] + s2[index]*s3[index];
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void CalInpXDiff(const int n, const int channels,
const Dtype* inp_1, const Dtype* inp_2, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
for(int c=0; c<channels; c++){
out[index] += inp_1[index*channels+c]*inp_2[index*channels+c];
}
}
}
template <typename Dtype>
void ConvConjugateNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
//const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* norm_inp = norm_inp_.gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* p_data = this->blobs_[0]->cpu_data();
const Dtype* n_buff = norm_buff_.gpu_data();
Dtype* n_diff = norm_buff_.mutable_gpu_diff();
Dtype* norm_inp_diff = norm_inp_.mutable_gpu_diff();
Dtype* norm_opt_diff = norm_opt_.mutable_gpu_diff();
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
hipLaunchKernelGGL(( im2colgpu<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), top_diff, norm_opt_diff);
CUDA_POST_KERNEL_CHECK;
int div_factor = channel_shared_ ? dim : 1;
// Propagate to param
if (this->param_propagate_down_[0]) {
Dtype* b_buff_data = backward_buff_.mutable_gpu_data();
Dtype* b_buff_diff = backward_buff_.mutable_gpu_diff();
Dtype* n_temp = norm_buff_2.mutable_gpu_data();
Dtype* n_temp_diff = norm_buff_2.mutable_gpu_diff();
Dtype* b_diff_2 = backward_buff_2.mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (channel_shared_){
Dtype* p_diff = this->blobs_[0]->mutable_cpu_diff();
Dtype p = p_data[0];
Dtype exp_diff = 1;
if (exp_p){
p = exp(p);
exp_diff = p;
}
Dtype q = p / (p-1.);
// s1
caffe_gpu_log(num*dim,n_buff,n_diff);
caffe_gpu_scale(num*dim,Dtype(-(1./p/(p-1.))),n_diff,n_temp);
// s2
caffe_gpu_powx(num*dim,n_buff,Dtype(-q),n_diff);
caffe_gpu_scale(num*dim,Dtype(1./p/(p-1.)),n_diff,n_temp_diff);
caffe_copy(count, norm_inp, b_buff_data);
caffe_gpu_abs(count, b_buff_data, b_buff_diff);
caffe_gpu_add_scalar(count, Dtype(1e-10), b_buff_diff);
caffe_gpu_log(count, b_buff_diff, norm_inp_diff);
caffe_gpu_powx(count, b_buff_diff, q, b_buff_data);
// s3 = s1 + s2*s3;
caffe_gpu_set(num*dim,Dtype(0.),n_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CalculateS3<Dtype>), dim3(CAFFE_GET_BLOCKS(num*dim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num*dim, sp_dim, norm_inp_.gpu_diff(), backward_buff_.gpu_data(), n_temp, n_temp_diff, n_diff);
CUDA_POST_KERNEL_CHECK;
//caffe_gpu_set(dim, Dtype(0.), b_buff_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ConjugateNormParamBackward1<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, sp_dim, exp_diff, n_buff, n_diff, norm_opt_diff, norm_inp, bottom_diff);
CUDA_POST_KERNEL_CHECK;
Dtype dsum=0;
caffe_gpu_dot<Dtype>(count, bottom[0]->gpu_diff(), multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), p_diff);
}
else{
Dtype* p_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = sp_dim * dim;
caffe_gpu_set(cdim, Dtype(0.), b_diff_2);
for (int i=0; i<num*dim; i++){
int c = i % dim / div_factor;
Dtype* b_buff_diff_2 = b_diff_2 + (i%dim) *sp_dim;
Dtype p = p_data[c];
Dtype exp_diff = 1;
if (exp_p){
p = exp(p);
exp_diff = p;
}
Dtype q = p / (p-1.);
Dtype qnorm = n_buff[i];
Dtype s1 = -(1./p/(p-1.)) * log(qnorm);
Dtype s2 = (1./p/(p-1.)) * 1./pow(qnorm,q);
caffe_copy(sp_dim, norm_inp+i*sp_dim, b_buff_data);
caffe_gpu_add_scalar(sp_dim, Dtype(1e-20), b_buff_data);
caffe_gpu_powx(sp_dim, b_buff_data, Dtype(2), b_buff_diff);
caffe_gpu_powx(sp_dim, b_buff_diff, Dtype(0.5), b_buff_data);
caffe_gpu_log(sp_dim, b_buff_data, bottom_diff);
caffe_gpu_powx(sp_dim, b_buff_data, q, b_buff_diff);
Dtype s3;
caffe_gpu_dot(sp_dim, bottom[0]->gpu_diff(), backward_buff_.gpu_diff(), &s3);
s3 = s1 + s2*s3;
//caffe_gpu_set(dim, Dtype(0.), b_buff_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ConjugateNormParamBackward2<Dtype>), dim3(CAFFE_GET_BLOCKS(sp_dim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sp_dim, qnorm, s3, exp_diff, norm_opt_diff+i*sp_dim, norm_inp+i*sp_dim, b_buff_diff_2);
CUDA_POST_KERNEL_CHECK;
}
caffe_gpu_gemv<Dtype>(CblasNoTrans, dim, sp_dim, 1.,
backward_buff_2.gpu_diff(), multiplier_.gpu_data(), 1.,
p_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_set(count,Dtype(0.),bottom_diff);
caffe_gpu_set(num*dim,Dtype(0.),n_diff);
if (channel_shared_){
Dtype p = p_data[0];
if (exp_p){
p = exp(p);
}
Dtype q = p / (p-1.);
// inpxdiff;
// 1. caffe_gpu_gemm(CblasNoTrans,CblasTrans,)
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CalInpXDiff<Dtype>), dim3(CAFFE_GET_BLOCKS(num*dim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num*dim, sp_dim, norm_inp, norm_opt_diff, n_diff);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ConjugateNormSharedBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, sp_dim, q, n_buff, n_diff, norm_opt_diff,
norm_inp, norm_inp_diff);
CUDA_POST_KERNEL_CHECK;
}else{
for(int i=0; i<num*dim; i++){
int c = i % dim / div_factor;
Dtype p = p_data[c];
if (exp_p){
p = exp(p);
}
if (p < 1.25){
hipLaunchKernelGGL(( ConjugateNormBackward2<Dtype>), dim3(CAFFE_GET_BLOCKS(sp_dim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, sp_dim, n_buff[i], norm_opt_diff+i*sp_dim,
norm_inp+i*sp_dim, norm_inp_diff+i*sp_dim);
CUDA_POST_KERNEL_CHECK;
}else{
Dtype q = p / (p-1.);
Dtype qnorm = n_buff[i];
Dtype inpxdiff;
caffe_gpu_dot(sp_dim, norm_inp+i*sp_dim, norm_opt_diff+i*sp_dim, &inpxdiff);
//LOG(INFO) << "inpxdiff: " << inpxdiff;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ConjugateNormBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(sp_dim)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
sp_dim, q, qnorm, inpxdiff, norm_opt_diff+i*sp_dim,
norm_inp+i*sp_dim, norm_inp_diff+i*sp_dim);
CUDA_POST_KERNEL_CHECK;
}
}
}
hipLaunchKernelGGL(( col2imgpu<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), norm_inp_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvConjugateNormLayer);
} // namespace caffe
| c0c6f361fc700b72d11a9cd8990b0364b3a81f1f.cu | #include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/neuron_layer.hpp"
#include "caffe/layers/conv_conjugate_norm_layer.hpp"
namespace caffe {
template <typename Dtype>
Dtype FindMax(const int count, const Dtype* inp) {
Dtype maxinp = abs(inp[0]);
for (int i=1; i<count; i++){
if (abs(inp[i]) > maxinp){
maxinp = abs(inp[i]);
}
}
return maxinp;
}
template <typename Dtype>
void PNorm(const int count, const Dtype p, const Dtype* inp,
Dtype* f_buff, Dtype &n_buff, Dtype* out) {
caffe_gpu_abs(count, inp, f_buff);
//caffe_gpu_add_scalar(dim, Dtype(1e-10), b_buff_data);
caffe_gpu_powx(count, f_buff, p, out);
Dtype denom;
caffe_gpu_asum(count, out, &denom);
denom = pow(denom, 1./p) + Dtype(1e-10);
n_buff = denom;
caffe_gpu_scale(count, Dtype(1./denom), inp, out);
}
template <typename Dtype>
__global__ void NormScaleOut(const bool spatial, const int k_size,
const int count, const int channels, const int height,
const int width, const Dtype* inp, const Dtype* n_buff, Dtype* out) {
CUDA_KERNEL_LOOP(index, count){
if (spatial){
int num_H = ceil(float(height) / k_size);
int num_W = ceil(float(width) / k_size);
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int ch = floor(float(h) / k_size);
int cw = floor(float(w) / k_size);
int ph = h % k_size;
int pw = w % k_size;
int inp_ind = ((((n*channels+c)*num_H+ch)*num_W+cw)*k_size+ph)*k_size+pw;
int out_ind = ((n*channels+c)*height+h)*width+w;
out[out_ind] = inp[inp_ind] / n_buff[((n*channels+c)*num_H+ch)*num_W+cw];
}else{
int dim = height*width;
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int inp_ind = (n*dim+h*width+w)*channels+c;
int out_ind = (n*channels+c)*dim+h*width+w;
out[out_ind] = inp[inp_ind] / n_buff[n*dim+h*width+w];
}
}
}
// CUDA kernel for im2col
template <typename Dtype>
__global__ void im2colgpu(const bool spatial, const int k_size,
const int count, const int channels, const int height,
const int width, const Dtype* inp, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
if (spatial){
int num_H = ceil(float(height) / k_size);
int num_W = ceil(float(width) / k_size);
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int ch = floor(float(h) / k_size);
int cw = floor(float(w) / k_size);
int ph = h % k_size;
int pw = w % k_size;
int out_ind = ((((n*channels+c)*num_H+ch)*num_W+cw)*k_size+ph)*k_size+pw;
int inp_ind = ((n*channels+c)*height+h)*width+w;
out[out_ind] = inp[inp_ind];
}else{
int dim = height*width;
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int out_ind = (n*dim+h*width+w)*channels+c;
int inp_ind = (n*channels+c)*dim+h*width+w;
out[out_ind] = inp[inp_ind];
}
}
}
// CUDA kernel for im2col
template <typename Dtype>
__global__ void col2imgpu(const bool spatial, const int k_size,
int count, const int channels, const int height,
const int width, const Dtype* inp, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
if (spatial){
int num_H = ceil(float(height) / k_size);
int num_W = ceil(float(width) / k_size);
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int ch = floor(float(h) / k_size);
int cw = floor(float(w) / k_size);
int ph = h % k_size;
int pw = w % k_size;
int inp_ind = ((((n*channels+c)*num_H+ch)*num_W+cw)*k_size+ph)*k_size+pw;
int out_ind = ((n*channels+c)*height+h)*width+w;
out[out_ind] = inp[inp_ind];
}else{
int dim = height*width;
int n = index / (channels*height*width);
int c = index / (height*width) % channels;
int h = index / width % height;
int w = index % width;
int inp_ind = (n*dim+h*width+w)*channels+c;
int out_ind = (n*channels+c)*dim+h*width+w;
out[out_ind] = inp[inp_ind];
}
}
}
template <typename Dtype>
void ConvConjugateNormLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
Dtype* p_data = this->blobs_[0]->mutable_cpu_data();
Dtype* n_buff = norm_buff_.mutable_gpu_data();
Dtype* n_buff_diff = norm_buff_.mutable_gpu_diff();
Dtype* f_buff = forward_buff_.mutable_gpu_data();
Dtype* norm_inp = norm_inp_.mutable_gpu_data();
Dtype* norm_inp_diff = norm_inp_.mutable_gpu_diff();
Dtype* norm_opt = norm_opt_.mutable_gpu_data();
im2colgpu<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>( spatial, k_size, count,
channels, bottom[0]->height(), bottom[0]->width(), bottom_data, norm_inp);
CUDA_POST_KERNEL_CHECK;
if (channel_shared_){
//LOG(INFO) << "shared_norm!";
Dtype p = p_data[0];
if (exp_p){
p = exp(p);
}
if (p > max_p){
p = max_p;
p_data[0] = max_p;
if (exp_p){
p_data[0] = log(max_p);
}
}
if (p < min_p){
p = min_p;
p_data[0] = min_p;
if (exp_p){
p_data[0] = log(min_p);
}
}
Dtype q = p / (p - 1.);
caffe_gpu_abs(count, norm_inp, norm_opt);
caffe_gpu_powx(count, norm_opt, Dtype(q), norm_inp_diff);
caffe_gpu_gemv<Dtype>(CblasNoTrans, num*dim, sp_dim, 1.,
norm_inp_.gpu_diff(), multiplier_.gpu_data(), 0.,
n_buff_diff);
caffe_gpu_powx(num*dim, n_buff_diff, Dtype(1./q), n_buff);
caffe_gpu_add_scalar(num*dim, Dtype(1e-10), n_buff);
//for(int i=0; i<5; i++){
// LOG(INFO) << "n_buff: "<< norm_buff_.cpu_data()[i];
//}
NormScaleOut<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), norm_inp, n_buff, top_data);
CUDA_POST_KERNEL_CHECK;
}else{
const int div_factor = channel_shared_ ? dim : 1;
for (int i = 0; i < num*dim; ++i) {
int c = i % dim / div_factor;
Dtype p = p_data[c];
if (exp_p){
p = exp(p);
}
if (p > max_p){
p = max_p;
p_data[c] = max_p;
if (exp_p){
p_data[c] = log(max_p);
}
}
if (p < min_p){
p = min_p;
p_data[c] = min_p;
if (exp_p){
p_data[c] = log(min_p);
}
}
if (p < 1.25){
n_buff[i] = FindMax(sp_dim, norm_inp+i*dim);
caffe_gpu_scale(sp_dim, Dtype(1./n_buff[i]), norm_inp, norm_opt);
}else{
Dtype q = p / (p - 1.);
PNorm(sp_dim, q, norm_inp+i*sp_dim, f_buff, n_buff[i], norm_opt+i*sp_dim);
}
}
col2imgpu<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>( spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), norm_opt, top_data);
CUDA_POST_KERNEL_CHECK;
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void ConjugateNormBackward2(const int n, const Dtype pnorm,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
if(abs(in_data[index]) == pnorm){
out_diff[index] = 0.0;
}else{
out_diff[index] = in_diff[index] / pnorm;
}
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void ConjugateNormSharedBackward(const int n, const int channels,
const Dtype p, const Dtype* pnorm, const Dtype* inpxdiff, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
int c = index / channels;
out_diff[index] = in_diff[index] / pnorm[c] - inpxdiff[c] * in_data[index]
* pow((abs(in_data[index])+Dtype(1e-20)),(p-2)) / pow((pnorm[c]+Dtype(1e-10)), (p+1));
}
}
// CUDA kernel for bottom backward
template <typename Dtype>
__global__ void ConjugateNormBackward(const int n, const Dtype p,
const Dtype pnorm, const Dtype inpxdiff, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] / pnorm - inpxdiff * in_data[index]
* pow((abs(in_data[index])+Dtype(1e-20)),(p-2)) / pow((pnorm+Dtype(1e-10)), (p+1));
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void ConjugateNormParamBackward2(const int n,
const Dtype pnorm, const Dtype m, const Dtype exp_diff,
const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] += in_diff[index] * in_data[index] / pnorm * m * exp_diff;
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void ConjugateNormParamBackward1(const int n, const int channels,
const Dtype exp_diff, const Dtype* pnorm, const Dtype* m, const Dtype* in_diff,
const Dtype* in_data, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
int c = index / channels;
out_diff[index] = in_diff[index] * in_data[index] / pnorm[c] * m[c] * exp_diff;
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void CalculateS3(const int n, const int channels,
const Dtype* inp_1, const Dtype* inp_2, const Dtype* s1,
const Dtype* s2, Dtype* s3) {
CUDA_KERNEL_LOOP(index, n) {
for(int c=0; c<channels; c++){
s3[index] += inp_1[index*channels+c]*inp_2[index*channels+c];
}
s3[index] = s1[index] + s2[index]*s3[index];
}
}
// CUDA kernel for element-wise parameter backward
template <typename Dtype>
__global__ void CalInpXDiff(const int n, const int channels,
const Dtype* inp_1, const Dtype* inp_2, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
for(int c=0; c<channels; c++){
out[index] += inp_1[index*channels+c]*inp_2[index*channels+c];
}
}
}
template <typename Dtype>
void ConvConjugateNormLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
//const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* norm_inp = norm_inp_.gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* p_data = this->blobs_[0]->cpu_data();
const Dtype* n_buff = norm_buff_.gpu_data();
Dtype* n_diff = norm_buff_.mutable_gpu_diff();
Dtype* norm_inp_diff = norm_inp_.mutable_gpu_diff();
Dtype* norm_opt_diff = norm_opt_.mutable_gpu_diff();
const int count = bottom[0]->count();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
im2colgpu<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), top_diff, norm_opt_diff);
CUDA_POST_KERNEL_CHECK;
int div_factor = channel_shared_ ? dim : 1;
// Propagate to param
if (this->param_propagate_down_[0]) {
Dtype* b_buff_data = backward_buff_.mutable_gpu_data();
Dtype* b_buff_diff = backward_buff_.mutable_gpu_diff();
Dtype* n_temp = norm_buff_2.mutable_gpu_data();
Dtype* n_temp_diff = norm_buff_2.mutable_gpu_diff();
Dtype* b_diff_2 = backward_buff_2.mutable_gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
if (channel_shared_){
Dtype* p_diff = this->blobs_[0]->mutable_cpu_diff();
Dtype p = p_data[0];
Dtype exp_diff = 1;
if (exp_p){
p = exp(p);
exp_diff = p;
}
Dtype q = p / (p-1.);
// s1
caffe_gpu_log(num*dim,n_buff,n_diff);
caffe_gpu_scale(num*dim,Dtype(-(1./p/(p-1.))),n_diff,n_temp);
// s2
caffe_gpu_powx(num*dim,n_buff,Dtype(-q),n_diff);
caffe_gpu_scale(num*dim,Dtype(1./p/(p-1.)),n_diff,n_temp_diff);
caffe_copy(count, norm_inp, b_buff_data);
caffe_gpu_abs(count, b_buff_data, b_buff_diff);
caffe_gpu_add_scalar(count, Dtype(1e-10), b_buff_diff);
caffe_gpu_log(count, b_buff_diff, norm_inp_diff);
caffe_gpu_powx(count, b_buff_diff, q, b_buff_data);
// s3 = s1 + s2*s3;
caffe_gpu_set(num*dim,Dtype(0.),n_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
CalculateS3<Dtype><<<CAFFE_GET_BLOCKS(num*dim),
CAFFE_CUDA_NUM_THREADS>>>(
num*dim, sp_dim, norm_inp_.gpu_diff(), backward_buff_.gpu_data(), n_temp, n_temp_diff, n_diff);
CUDA_POST_KERNEL_CHECK;
//caffe_gpu_set(dim, Dtype(0.), b_buff_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
ConjugateNormParamBackward1<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, sp_dim, exp_diff, n_buff, n_diff, norm_opt_diff, norm_inp, bottom_diff);
CUDA_POST_KERNEL_CHECK;
Dtype dsum=0;
caffe_gpu_dot<Dtype>(count, bottom[0]->gpu_diff(), multiplier_.gpu_data(), &dsum);
caffe_gpu_add_scalar(this->blobs_[0]->count(), Dtype(dsum), p_diff);
}
else{
Dtype* p_diff = this->blobs_[0]->mutable_gpu_diff();
int cdim = sp_dim * dim;
caffe_gpu_set(cdim, Dtype(0.), b_diff_2);
for (int i=0; i<num*dim; i++){
int c = i % dim / div_factor;
Dtype* b_buff_diff_2 = b_diff_2 + (i%dim) *sp_dim;
Dtype p = p_data[c];
Dtype exp_diff = 1;
if (exp_p){
p = exp(p);
exp_diff = p;
}
Dtype q = p / (p-1.);
Dtype qnorm = n_buff[i];
Dtype s1 = -(1./p/(p-1.)) * log(qnorm);
Dtype s2 = (1./p/(p-1.)) * 1./pow(qnorm,q);
caffe_copy(sp_dim, norm_inp+i*sp_dim, b_buff_data);
caffe_gpu_add_scalar(sp_dim, Dtype(1e-20), b_buff_data);
caffe_gpu_powx(sp_dim, b_buff_data, Dtype(2), b_buff_diff);
caffe_gpu_powx(sp_dim, b_buff_diff, Dtype(0.5), b_buff_data);
caffe_gpu_log(sp_dim, b_buff_data, bottom_diff);
caffe_gpu_powx(sp_dim, b_buff_data, q, b_buff_diff);
Dtype s3;
caffe_gpu_dot(sp_dim, bottom[0]->gpu_diff(), backward_buff_.gpu_diff(), &s3);
s3 = s1 + s2*s3;
//caffe_gpu_set(dim, Dtype(0.), b_buff_diff);
// NOLINT_NEXT_LINE(whitespace/operators)
ConjugateNormParamBackward2<Dtype><<<CAFFE_GET_BLOCKS(sp_dim),
CAFFE_CUDA_NUM_THREADS>>>(
sp_dim, qnorm, s3, exp_diff, norm_opt_diff+i*sp_dim, norm_inp+i*sp_dim, b_buff_diff_2);
CUDA_POST_KERNEL_CHECK;
}
caffe_gpu_gemv<Dtype>(CblasNoTrans, dim, sp_dim, 1.,
backward_buff_2.gpu_diff(), multiplier_.gpu_data(), 1.,
p_diff);
}
}
// Propagate to bottom
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_set(count,Dtype(0.),bottom_diff);
caffe_gpu_set(num*dim,Dtype(0.),n_diff);
if (channel_shared_){
Dtype p = p_data[0];
if (exp_p){
p = exp(p);
}
Dtype q = p / (p-1.);
// inpxdiff;
// 1. caffe_gpu_gemm(CblasNoTrans,CblasTrans,)
// NOLINT_NEXT_LINE(whitespace/operators)
CalInpXDiff<Dtype><<<CAFFE_GET_BLOCKS(num*dim),
CAFFE_CUDA_NUM_THREADS>>>(
num*dim, sp_dim, norm_inp, norm_opt_diff, n_diff);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
ConjugateNormSharedBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, sp_dim, q, n_buff, n_diff, norm_opt_diff,
norm_inp, norm_inp_diff);
CUDA_POST_KERNEL_CHECK;
}else{
for(int i=0; i<num*dim; i++){
int c = i % dim / div_factor;
Dtype p = p_data[c];
if (exp_p){
p = exp(p);
}
if (p < 1.25){
ConjugateNormBackward2<Dtype><<<CAFFE_GET_BLOCKS(sp_dim),
CAFFE_CUDA_NUM_THREADS>>>( sp_dim, n_buff[i], norm_opt_diff+i*sp_dim,
norm_inp+i*sp_dim, norm_inp_diff+i*sp_dim);
CUDA_POST_KERNEL_CHECK;
}else{
Dtype q = p / (p-1.);
Dtype qnorm = n_buff[i];
Dtype inpxdiff;
caffe_gpu_dot(sp_dim, norm_inp+i*sp_dim, norm_opt_diff+i*sp_dim, &inpxdiff);
//LOG(INFO) << "inpxdiff: " << inpxdiff;
// NOLINT_NEXT_LINE(whitespace/operators)
ConjugateNormBackward<Dtype><<<CAFFE_GET_BLOCKS(sp_dim),
CAFFE_CUDA_NUM_THREADS>>>(
sp_dim, q, qnorm, inpxdiff, norm_opt_diff+i*sp_dim,
norm_inp+i*sp_dim, norm_inp_diff+i*sp_dim);
CUDA_POST_KERNEL_CHECK;
}
}
}
col2imgpu<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(spatial, k_size,
count, channels, bottom[0]->height(), bottom[0]->width(), norm_inp_diff, bottom_diff);
CUDA_POST_KERNEL_CHECK;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ConvConjugateNormLayer);
} // namespace caffe
|
07ea8e4855215757f44e672b9c1831bf60a45524.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
#include "tensors/device.h"
#include "tensors/gpu/cuda_helpers.h"
namespace marian {
namespace gpu {
Device::~Device() {
// Note: The CUDA_CHECKs here are not throwing, but will terminate the program.
CUDA_CHECK(hipSetDevice(deviceId_.no));
if(data_) {
CUDA_CHECK(hipFree(data_));
}
CUDA_CHECK(hipDeviceSynchronize());
}
void Device::reserve(size_t size) {
size = align(size);
CUDA_CHECK(hipSetDevice(deviceId_.no));
ABORT_IF(size < size_ || size == 0,
"New size must be larger than old size and larger than 0");
if(data_) {
// Allocate memory while temporarily parking original content in host memory
std::vector<uint8_t> temp(size_);
CUDA_CHECK(hipMemcpy(temp.data(), data_, size_, hipMemcpyDeviceToHost));
CUDA_CHECK(hipFree(data_));
LOG(debug, "[memory] Re-allocating from {} to {} bytes on device {}", size_, size, deviceId_.no);
CUDA_CHECK(hipMalloc(&data_, size));
CUDA_CHECK(hipMemcpy(data_, temp.data(), size_, hipMemcpyHostToDevice));
//logCallStack(0);
} else {
// No data_ yet: Just alloc.
LOG(debug, "[memory] Allocating {} bytes in device {}", size, deviceId_.no);
CUDA_CHECK(hipMalloc(&data_, size));
}
size_ = size;
}
} // namespace gpu
} // namespace marian
| 07ea8e4855215757f44e672b9c1831bf60a45524.cu | #include <cuda.h>
#include <iostream>
#include "tensors/device.h"
#include "tensors/gpu/cuda_helpers.h"
namespace marian {
namespace gpu {
Device::~Device() {
// Note: The CUDA_CHECKs here are not throwing, but will terminate the program.
CUDA_CHECK(cudaSetDevice(deviceId_.no));
if(data_) {
CUDA_CHECK(cudaFree(data_));
}
CUDA_CHECK(cudaDeviceSynchronize());
}
void Device::reserve(size_t size) {
size = align(size);
CUDA_CHECK(cudaSetDevice(deviceId_.no));
ABORT_IF(size < size_ || size == 0,
"New size must be larger than old size and larger than 0");
if(data_) {
// Allocate memory while temporarily parking original content in host memory
std::vector<uint8_t> temp(size_);
CUDA_CHECK(cudaMemcpy(temp.data(), data_, size_, cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaFree(data_));
LOG(debug, "[memory] Re-allocating from {} to {} bytes on device {}", size_, size, deviceId_.no);
CUDA_CHECK(cudaMalloc(&data_, size));
CUDA_CHECK(cudaMemcpy(data_, temp.data(), size_, cudaMemcpyHostToDevice));
//logCallStack(0);
} else {
// No data_ yet: Just alloc.
LOG(debug, "[memory] Allocating {} bytes in device {}", size, deviceId_.no);
CUDA_CHECK(cudaMalloc(&data_, size));
}
size_ = size;
}
} // namespace gpu
} // namespace marian
|
8f2153b69f36786f0e307a4c3b56b4ec4a4edea5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <iostream>
#include "GraphicGenerator.h"
#include "MathTools.h"
#include "Device.h"
using cpu::IntervalI;
using std::cout;
using std::endl;
using std::string;
extern __global__ void processMandelbrot(uchar4* ptrTabPixels, int width, int height, DomaineMath mathDomain, int max);
extern __global__ void processJulia(uchar4* ptrDevPixels, int width, int height, DomaineMath mathDomain, int max);
// -------------------------
// Constructor & Destructor
// -------------------------
GraphicGenerator::GraphicGenerator(unsigned int width, unsigned int height, unsigned int nMin, unsigned int nMax, DomaineMath *ptrMathDomain, bool isMandelbrot,
string title) :
variateurN(IntervalI(nMin, nMax), 1)
{
// Basic settings
this->width = width;
this->height = height;
this->title = title;
this->max = nMax;
// Mathematical settings
this->ptrMathDomain = ptrMathDomain;
this->isMandelbrot = isMandelbrot;
this->ptrProcessFunction = isMandelbrot ? &GraphicGenerator::processMandelbrotOnGPU : &GraphicGenerator::processJuliaOnGPU;
this->dg = dim3(8, 8, 1); // disons a optimiser
this->db = dim3(16, 16, 1); // disons a optimiser
Device::assertDim(this->dg, this->db);
}
GraphicGenerator::~GraphicGenerator()
{
delete this->ptrMathDomain;
}
// ------------------
// Overrides members
// ------------------
void GraphicGenerator::process(uchar4* ptrDevPixels, int width, int height, const DomaineMath& mathDomain)
{
(this->*ptrProcessFunction)(ptrDevPixels, width, height, mathDomain);
}
void GraphicGenerator::processMandelbrotOnGPU(uchar4* ptrTabPixels, int width, int height, const DomaineMath& mathDomain)
{hipLaunchKernelGGL((
processMandelbrot), dim3(this->dg), dim3(this->db), 0, 0, ptrTabPixels, width, height, mathDomain, this->max);
}
void GraphicGenerator::processJuliaOnGPU(uchar4* ptrTabPixels, int width, int height, const DomaineMath& mathDomain)
{hipLaunchKernelGGL((
processJulia), dim3(this->dg), dim3(this->db), 0, 0, ptrTabPixels, width, height, mathDomain, this->max);
}
void GraphicGenerator::animationStep()
{
this->max = variateurN.varierAndGet();
}
float GraphicGenerator::getAnimationPara()
{
return this->max;
}
string GraphicGenerator::getTitle()
{
return this->title;
}
int GraphicGenerator::getW()
{
return this->width;
}
int GraphicGenerator::getH()
{
return this->height;
}
DomaineMath* GraphicGenerator::getDomaineMathInit()
{
return this->ptrMathDomain;
}
| 8f2153b69f36786f0e307a4c3b56b4ec4a4edea5.cu | #include <assert.h>
#include <iostream>
#include "GraphicGenerator.h"
#include "MathTools.h"
#include "Device.h"
using cpu::IntervalI;
using std::cout;
using std::endl;
using std::string;
extern __global__ void processMandelbrot(uchar4* ptrTabPixels, int width, int height, DomaineMath mathDomain, int max);
extern __global__ void processJulia(uchar4* ptrDevPixels, int width, int height, DomaineMath mathDomain, int max);
// -------------------------
// Constructor & Destructor
// -------------------------
GraphicGenerator::GraphicGenerator(unsigned int width, unsigned int height, unsigned int nMin, unsigned int nMax, DomaineMath *ptrMathDomain, bool isMandelbrot,
string title) :
variateurN(IntervalI(nMin, nMax), 1)
{
// Basic settings
this->width = width;
this->height = height;
this->title = title;
this->max = nMax;
// Mathematical settings
this->ptrMathDomain = ptrMathDomain;
this->isMandelbrot = isMandelbrot;
this->ptrProcessFunction = isMandelbrot ? &GraphicGenerator::processMandelbrotOnGPU : &GraphicGenerator::processJuliaOnGPU;
this->dg = dim3(8, 8, 1); // disons a optimiser
this->db = dim3(16, 16, 1); // disons a optimiser
Device::assertDim(this->dg, this->db);
}
GraphicGenerator::~GraphicGenerator()
{
delete this->ptrMathDomain;
}
// ------------------
// Overrides members
// ------------------
void GraphicGenerator::process(uchar4* ptrDevPixels, int width, int height, const DomaineMath& mathDomain)
{
(this->*ptrProcessFunction)(ptrDevPixels, width, height, mathDomain);
}
void GraphicGenerator::processMandelbrotOnGPU(uchar4* ptrTabPixels, int width, int height, const DomaineMath& mathDomain)
{
processMandelbrot<<<this->dg, this->db>>>(ptrTabPixels, width, height, mathDomain, this->max);
}
void GraphicGenerator::processJuliaOnGPU(uchar4* ptrTabPixels, int width, int height, const DomaineMath& mathDomain)
{
processJulia<<<this->dg, this->db>>>(ptrTabPixels, width, height, mathDomain, this->max);
}
void GraphicGenerator::animationStep()
{
this->max = variateurN.varierAndGet();
}
float GraphicGenerator::getAnimationPara()
{
return this->max;
}
string GraphicGenerator::getTitle()
{
return this->title;
}
int GraphicGenerator::getW()
{
return this->width;
}
int GraphicGenerator::getH()
{
return this->height;
}
DomaineMath* GraphicGenerator::getDomaineMathInit()
{
return this->ptrMathDomain;
}
|
8ed8021a8c78b04cc7c59d7af8feeac0fd452415.hip | // !!! This is a file automatically generated by hipify!!!
/* Norman Ponte; Joey Fernau
* annotation generation test
*/
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <getopt.h>
#include <string>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <driver_functions.h>
#include "../../lib/CycleTimer.h"
extern float toBW(int bytes, float sec);
__device__ int test ( int x , int y , int z ) {
int result = 0;
if (x == 0) {
for (int i = 0; i < 1000000; i++)
result += y - z;
} else if (x == 1) {
for (int i = 0; i < 1000000; i++)
result += y + z;
} else if (x == 2) {
for (int i = 0; i < 1000000; i++)
result += y * z;
} else {
for (int i = 0; i < 1000000; i++)
result += y / z;
}
return result;
}
__global__ void
test_kernel(int N, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
result[index] = test(index % 4, index % 13, index % 7);
}
}
void
mainCuda(int N, float* resultarray) {
int totalBytes = sizeof(float) * N;
// compute number of blocks and threads per block
const int threadsPerBlock = 32;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_result;
hipMalloc((void **) &device_result, totalBytes);
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//hipMemcpy(device_x, xarray, N * sizeof(float),
// hipMemcpyHostToDevice);
double kernelStartTime = CycleTimer::currentSeconds();
hipLaunchKernelGGL(( test_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, N, device_result);
hipDeviceSynchronize();
double kernelEndTime = CycleTimer::currentSeconds();
hipMemcpy(resultarray, device_result, N * sizeof(float),
hipMemcpyDeviceToHost);
double endTime = CycleTimer::currentSeconds();
hipError_t errCode = hipPeekAtLastError();
if (errCode != hipSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, hipGetErrorString(errCode));
}
double kernelDuration = kernelEndTime - kernelStartTime;
printf("Kernel time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * kernelDuration, toBW(totalBytes, kernelDuration));
double overallDuration = endTime - startTime;
printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
//std::cout << "{ ";
//for (int i = 0; i < N; i++) {
// std::cout << resultarray[i] << ", ";
//} std::cout << " }" << std::endl;
hipFree(device_result);
}
// return GB/s
float toBW(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
void mainCuda(int N, float* result);
int main(int argc, char** argv) {
int N = std::atoi(argv[1]); //1024;
float* resultarray = new float[N];
mainCuda(N, resultarray);
return 0;
}
| 8ed8021a8c78b04cc7c59d7af8feeac0fd452415.cu | /* Norman Ponte; Joey Fernau
* annotation generation test
*/
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <getopt.h>
#include <string>
#include <cuda.h>
#include <cuda_runtime.h>
#include <driver_functions.h>
#include "../../lib/CycleTimer.h"
extern float toBW(int bytes, float sec);
__device__ int test ( int x , int y , int z ) {
int result = 0;
if (x == 0) {
for (int i = 0; i < 1000000; i++)
result += y - z;
} else if (x == 1) {
for (int i = 0; i < 1000000; i++)
result += y + z;
} else if (x == 2) {
for (int i = 0; i < 1000000; i++)
result += y * z;
} else {
for (int i = 0; i < 1000000; i++)
result += y / z;
}
return result;
}
__global__ void
test_kernel(int N, float* result) {
// compute overall index from position of thread in current block,
// and given the block we are in
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
result[index] = test(index % 4, index % 13, index % 7);
}
}
void
mainCuda(int N, float* resultarray) {
int totalBytes = sizeof(float) * N;
// compute number of blocks and threads per block
const int threadsPerBlock = 32;
const int blocks = (N + threadsPerBlock - 1) / threadsPerBlock;
float* device_result;
cudaMalloc((void **) &device_result, totalBytes);
// start timing after allocation of device memory.
double startTime = CycleTimer::currentSeconds();
//cudaMemcpy(device_x, xarray, N * sizeof(float),
// cudaMemcpyHostToDevice);
double kernelStartTime = CycleTimer::currentSeconds();
test_kernel<<<blocks, threadsPerBlock>>>(N, device_result);
cudaThreadSynchronize();
double kernelEndTime = CycleTimer::currentSeconds();
cudaMemcpy(resultarray, device_result, N * sizeof(float),
cudaMemcpyDeviceToHost);
double endTime = CycleTimer::currentSeconds();
cudaError_t errCode = cudaPeekAtLastError();
if (errCode != cudaSuccess) {
fprintf(stderr, "WARNING: A CUDA error occured: code=%d, %s\n",
errCode, cudaGetErrorString(errCode));
}
double kernelDuration = kernelEndTime - kernelStartTime;
printf("Kernel time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * kernelDuration, toBW(totalBytes, kernelDuration));
double overallDuration = endTime - startTime;
printf("Overall time: %.3f ms\t\t[%.3f GB/s]\n", 1000.f * overallDuration, toBW(totalBytes, overallDuration));
//std::cout << "{ ";
//for (int i = 0; i < N; i++) {
// std::cout << resultarray[i] << ", ";
//} std::cout << " }" << std::endl;
cudaFree(device_result);
}
// return GB/s
float toBW(int bytes, float sec) {
return static_cast<float>(bytes) / (1024. * 1024. * 1024.) / sec;
}
void mainCuda(int N, float* result);
int main(int argc, char** argv) {
int N = std::atoi(argv[1]); //1024;
float* resultarray = new float[N];
mainCuda(N, resultarray);
return 0;
}
|
059d64c49c5ba365bbe8353fbd647cc9792443fa.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by zhanx on 11/10/2020.
//
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <cstdio>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include <queue>
#include "Scene.h"
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
std::cerr << hipGetErrorString(result) << "\n";
hipDeviceReset(); // Make sure we call CUDA Device Reset before exiting
exit(99);
}
}
#define CELL_X_NUM 32 // cell number in x axis
#define CELL_Y_NUM 32 // cell number in y axis
// CPU Memory
static dim3 blocks;
static dim3 threads;
static int num_pixles;
static int num_bvhElems;
static int num_triangles;
static Scene* scene_host = NULL;
BVHElem* bvhElem_host = NULL;
Bounds3* bound_host = NULL;
Triangle* triangle_host = NULL;
// GPU Memory
hiprandState_t *devStates;
static Ray* ray_dev = NULL;
BVHElem* bvhElem_dev = NULL;
Material* material_dev = NULL;
Triangle* triangle_dev = NULL;
Bounds3* bound_dev = NULL; // bounding box for objects not triangle
// Unified Memory
float *frameBuffer = NULL;
// cpu internal function definition
void BuildBvhNodeList(Scene* scene);
__device__ Vector3f Material::cudaSample (const Vector3f &wi, const Vector3f &N , hiprandState_t *state, int pid) { // sample a ray by Material properties
switch(m_type){
case DIFFUSE:
{
// uniform sample on the hemisphere
hiprandState_t localState = state[pid];
hiprand_init((unsigned int) clock64(), pid, 0, &localState);
float x_1 = hiprand_uniform(&localState);
hiprand_init((unsigned int) clock64(), pid, 0, &localState);
float x_2 = hiprand_uniform(&localState);
float z = fabs(1.0f - 2.0f * x_1);
float r = sqrt(1.0f - z * z), phi = 2 * M_PI * x_2;
Vector3f localRay(r*std::cos(phi), r*std::sin(phi), z);
return toWorld(localRay, N);
break;
}
}
return Vector3f(0, 0, 0);
}
__device__ void Triangle::CudaSample (Intersection &pos, float &pdf, hiprandState_t *state, int pid) {
hiprandState_t localState = state[pid];
float x = sqrt(hiprand_uniform(&localState)), y = hiprand_uniform(&localState);
pos.emit = this->m->m_emission;
pos.coords = v0 * (1.0f - x) + v1 * (x * (1.0f - y)) + v2 * (x * y);
pos.normal = this->normal;
pdf = 1.0f / area;
}
void InitRender(Scene* scene) {
// thread limit per block is 1024
blocks = dim3(CELL_X_NUM, CELL_Y_NUM);
threads = dim3((scene->width + blocks.x - 1) / blocks.x,
(scene->height + blocks.y - 1) / blocks.y);
num_pixles = scene->width * scene->height;
num_bvhElems = scene->bvh->nodeCount;
// convert BVH to BVHElem
BuildBvhNodeList(scene);
// Load to CPU Memory
scene_host = scene;
checkCudaErrors(hipMalloc(&ray_dev, num_pixles*sizeof(Ray)));
if (num_bvhElems > 0) {
checkCudaErrors(hipMalloc(&bvhElem_dev, num_bvhElems*sizeof(BVHElem)));
checkCudaErrors(hipMemcpy(bvhElem_dev,bvhElem_host,
num_bvhElems*sizeof(BVHElem), hipMemcpyHostToDevice));
}
if (num_triangles > 0) {
checkCudaErrors(hipMalloc(&material_dev, num_triangles*sizeof(Material)));
for (int i = 0; i<num_triangles; i++) {
checkCudaErrors(hipMemcpy(&material_dev[i], triangle_host[i].m, sizeof(Material), hipMemcpyHostToDevice));
}
}
if (num_triangles > 0) {
checkCudaErrors(hipMalloc(&triangle_dev, num_triangles*sizeof(Triangle)));
checkCudaErrors(hipMemcpy(triangle_dev, triangle_host, num_triangles*sizeof(Triangle), hipMemcpyHostToDevice));
// TODO: allcoate material_dev address to triangle_dev, failed
// for (int i = 0; i<num_triangles; i++) {
// checkCudaErrors(hipMemcpy(&triangle_dev[i].m, &material_dev[i], sizeof(Material*), hipMemcpyDeviceToDevice));
// }
}
if (num_bvhElems > 0) {
checkCudaErrors(hipMalloc(&bound_dev, num_bvhElems*sizeof(Bounds3)));
checkCudaErrors(hipMemcpy(bound_dev, bound_host,
num_bvhElems*sizeof(Bounds3), hipMemcpyHostToDevice));
}
// Init frame buffer in Unified Memory
checkCudaErrors(hipMallocManaged((void **)&frameBuffer, 3*num_pixles*sizeof(float)));
for (int i = 0; i<3*num_pixles; i++) {
frameBuffer[i] = 0;
}
// init cuda random generator
int threadNum = blocks.x * blocks.y * threads.x * threads.y;
checkCudaErrors(hipMalloc((void **)&devStates, threadNum * sizeof(hiprandState_t)));
}
void FreeRender() {
// free CPU
free(bvhElem_host);
free(bound_host);
free(triangle_host);
// free GPU
hipFree(ray_dev);
hipFree(bvhElem_dev);
hipFree(material_dev);
hipFree(triangle_dev);
hipFree(bound_dev);
hipFree(frameBuffer);
hipFree(devStates);
}
__device__ float cudaRandomFloat(hiprandState_t *state, int pid) {
hiprandState_t localState = state[pid];
hiprand_init((unsigned int) clock64(), pid, 0, &localState);
return hiprand_uniform(&localState);
}
__device__ void sampleLight(Intersection &pos, float &pdf,
int triangleNum, int pid,
Triangle* triangles, hiprandState_t *state) {
float emit_area_sum = 0;
// assume we only have one light in the scene
for (int k = 0; k < triangleNum; k++) {
if (triangles[k].cudaHasEmit()){
emit_area_sum += triangles[k].getArea();
}
}
float p = cudaRandomFloat(state, pid) * emit_area_sum;
emit_area_sum = 0;
for (int k = 0; k < triangleNum; k++) {
if (triangles[k].cudaHasEmit()){
emit_area_sum += triangles[k].getArea();
if (p <= emit_area_sum){
triangles[k].CudaSample(pos, pdf, state, pid);
break;
}
}
}
}
__device__ Intersection SceneIntersect(int pid, Ray ray, int bvhElemNum, int triangleNum,
BVHElem* bvhElems, Triangle* triangles, Bounds3* bounds) {
Intersection inter;
inter.coords = Vector3f(-1);
if (bvhElems == NULL)
return inter;
bool visited[32];
for (int i = 0; i<bvhElemNum; i++) {
bvhElems[i].visited = false;
visited[i] = false;
}
int arr[3] = {(ray.direction.x <= 0), (ray.direction.y <= 0), (ray.direction.z <= 0)};
// DFS BVHElem
int istack[32];
istack[0] = bvhElems[0].boundIdx;
int curSize = 1;
while (curSize > 0) {
BVHElem &curElem = bvhElems[istack[curSize - 1]];
bool vl = (curElem.leftIdx < 0 || visited[curElem.leftIdx]);
bool vr = (curElem.rightIdx < 0 || visited[curElem.rightIdx]);
if (vl && vr) {
visited[curElem.boundIdx] = true;
//curElem.visited = true;
curSize--;
if (curElem.isLeaf) { // node is leaf
for (int a = 0; a < curElem.triNum; a++) { // find intersection with all triangles of this object
Intersection ci = triangles[curElem.triStartIdx + a].getIntersection(ray);
if (ci.happened && (ci.distance<inter.distance)) {
inter = ci;
}
}
}
}
else {
if (curElem.leftIdx >= 0 && !visited[curElem.leftIdx]) {
// if (curElem.leftIdx >= 0 && !bvhElems[curElem.leftIdx].visited) {
BVHElem &left = bvhElems[curElem.leftIdx];
if (bounds[left.boundIdx].IntersectP(ray,
Vector3f(1 / (float) ray.direction.x, 1 / (float) ray.direction.y,
1 / (float) ray.direction.z), arr)) {
// if hit left bounding box
istack[curSize] = curElem.leftIdx;
curSize++;
} else {
visited[left.boundIdx] = true;
//left.visited = true;
}
}
if (curElem.rightIdx >= 0 && !visited[curElem.rightIdx]) {
BVHElem &right = bvhElems[curElem.rightIdx];
if (bounds[right.boundIdx].IntersectP(ray,
Vector3f(1 / (float) ray.direction.x, 1 / (float) ray.direction.y,
1 / (float) ray.direction.z), arr)) {
istack[curSize] = curElem.rightIdx;
curSize++;
} else {
visited[right.boundIdx] = true;
//right.visited = true;
}
}
if (curSize > 32) {
// TODO: handle CUDA kernel error
printf("stack overflow %d\n", 32);
}
}
}
return inter;
}
__device__ Vector3f CalcColor(int pid, int bvhElemNum, int triangleNum,
Ray* rays, BVHElem* bvhElems, Triangle* triangles, Bounds3* bounds, Material* materials,
hiprandState_t *state) {
Vector3f backgroundColor = Vector3f(0.235294, 0.67451, 0.843137);
Ray curRay = rays[pid];
Vector3f pixelColor = Vector3f(0,0,0);
int maxDepth = 3; // maxDepth cannot exceed stackSize/2
float RussianRoulette = 0.8;
Vector3f vstack[32];
for (int i = 0; i<32; i++) {
vstack[i] = Vector3f(0, 0, 0);
}
// stackstack
// 01 depth = 0 in_dir + dir color
// 23 depth = 1 in_dir + dir color
int curDepth = 0;
for (int d = 0; d <= maxDepth; d++) {
curDepth = d;
Intersection intersection = SceneIntersect(pid, curRay, bvhElemNum, triangleNum, bvhElems, triangles, bounds);
if(!intersection.happened) {
vstack[d*2+0] = backgroundColor;
break;
}
if (intersection.m != NULL && intersection.m->cudaHasEmission()) {
vstack[d*2+0] = Vector3f(1.0,1.0,1.0);
break;
}
// contribution from the light source
Vector3f dir_color = Vector3f(0, 0, 0);
float pdf_light;
Intersection lightPoint;
sampleLight(lightPoint, pdf_light, triangleNum, pid, triangles, state);
lightPoint.normal.normalized();
Vector3f w_dir = normalize(lightPoint.coords - intersection.coords);
Ray shadowRay(intersection.coords, w_dir);
Intersection shadowRayInter = SceneIntersect(pid, shadowRay, bvhElemNum, triangleNum, bvhElems, triangles, bounds);
// if light ray not blocked in the middle
if (!shadowRayInter.happened || shadowRayInter.m->cudaHasEmission())
{
if (pdf_light < FLT_EPSILON)
pdf_light = FLT_EPSILON;
Vector3f f_r1 = intersection.m->eval(-curRay.direction, w_dir, intersection.normal);
float kk = dotProduct(intersection.coords - lightPoint.coords, intersection.coords - lightPoint.coords);
dir_color = lightPoint.emit * f_r1 * dotProduct(w_dir, intersection.normal)
* dotProduct(-w_dir, lightPoint.normal) / kk / pdf_light;
}
vstack[d*2+0] = dir_color;
// contribution from other objects
// Russian Roulette
bool needBreak = true;
Vector3f indir_color = Vector3f(0,0,0);
float testrr = cudaRandomFloat(state, pid);
Vector3f randomDir;
if (testrr <= RussianRoulette) {
randomDir = intersection.m->cudaSample(-curRay.direction, intersection.normal, state, pid);
randomDir = randomDir.normalized();
float pdf_object = intersection.m->pdf(-curRay.direction, randomDir,intersection.normal);
Ray ro(intersection.coords, randomDir);
Intersection objRayInter = SceneIntersect(pid, ro, bvhElemNum, triangleNum, bvhElems, triangles, bounds);
if (objRayInter.happened)
if (!objRayInter.m->cudaHasEmission()) {
if (pdf_object < FLT_EPSILON)
pdf_object = FLT_EPSILON;
Vector3f f_r2 = intersection.m->eval(-curRay.direction, ro.direction, intersection.normal);
indir_color = f_r2 * dotProduct(ro.direction, intersection.normal) / pdf_object / RussianRoulette;
curRay = ro;
needBreak = false;
}
}
vstack[d * 2 + 1] = indir_color;
if (needBreak)
break;
}
//
for (int i = curDepth; i>0; i--) {
// in_dir + dir
Vector3f prev = vstack[i*2] + vstack[i*2+1];
vstack[2*i-1] = vstack[2*i-1] * prev;
}
pixelColor = vstack[0] + vstack[1];
return pixelColor;
}
__global__ void SetKernelRand(hiprandState_t *state, int h, int w)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int id = i + j * w;
/* Each thread gets same seed, a different sequence
number, no offset */
hiprand_init(100*sizeof(hiprandState_t), id, 0, &state[id]);
}
__device__ float deg2rad(const float& deg) { return deg * M_PI / 180.0; }
__global__ void GenerateRay(int width, int height, double fov, float* fb, Ray* rays) {
Vector3f eye_pos(278, 273, -800);
float scale = tan(deg2rad(fov * 0.5));
float imageAspectRatio = width / (float)height;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < (float)width && j < (float)height)
{
int pixelIdx = i + (j * width);
float x = (2 * (i + 0.5) / (float)width - 1) *
imageAspectRatio * scale;
float y = (1 - 2 * (j + 0.5) / (float)height) * scale;
Vector3f dir = normalize(Vector3f(-x, y, 1));
Ray &ray = rays[pixelIdx];
ray.origin = eye_pos;
ray.direction = dir;
}
}
__global__ void CastRay(int width, int height, float* fb, Ray* rays,
int bvhElemNum, int triangleNum,
BVHElem* bvhElems, Triangle* triangles, Bounds3* bounds, Material* materials,
hiprandState_t *state) {
int spp = 8;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < width && j < height) {
int pixelIdx = i + (j * width);
fb[pixelIdx*3+0] = 0;
fb[pixelIdx*3+1] = 0;
fb[pixelIdx*3+2] = 0;
for (int time = 0; time < spp; time++) {
Vector3f c = CalcColor(pixelIdx, bvhElemNum, triangleNum,
rays, bvhElems, triangles, bounds, materials, state);
fb[pixelIdx*3+0] += c.x;
fb[pixelIdx*3+1] += c.y;
fb[pixelIdx*3+2] += c.z;
}
fb[pixelIdx*3+0] /= (float) spp;
fb[pixelIdx*3+1] /= (float) spp;
fb[pixelIdx*3+2] /= (float) spp;
}
}
__global__ void SetTriangleValue(int triangleNum, Triangle* triangles, Material* materials) {
for (int i = 0; i<triangleNum; i++) {
triangles[i].m = materials+i;
}
}
void Render() {
hipLaunchKernelGGL(( SetKernelRand), dim3(blocks), dim3(threads), 0, 0, devStates, scene_host->height, scene_host->width);
hipLaunchKernelGGL(( SetTriangleValue), dim3(blocks), dim3(threads), 0, 0, num_triangles, triangle_dev, material_dev);
hipLaunchKernelGGL(( GenerateRay), dim3(blocks), dim3(threads), 0, 0, scene_host->width, scene_host->height, scene_host->fov, frameBuffer, ray_dev);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( CastRay), dim3(blocks), dim3(threads), 0, 0, scene_host->width, scene_host->height, frameBuffer,
ray_dev,num_bvhElems, num_triangles,
bvhElem_dev, triangle_dev, bound_dev, material_dev,
devStates);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// save color data to ppm file
FILE* fp = fopen("image.ppm", "wb");
(void)fprintf(fp, "P6\n%d %d\n255\n", scene_host->width, scene_host->height);
for (auto i = 0; i < num_pixles; ++i) {
static unsigned char color[3];
color[0] = (unsigned char)(255 * ::pow(clamp(0, 1, frameBuffer[i*3+0]), 0.6f));
color[1] = (unsigned char)(255 * ::pow(clamp(0, 1, frameBuffer[i*3+1]), 0.6f));
color[2] = (unsigned char)(255 * ::pow(clamp(0, 1, frameBuffer[i*3+2]), 0.6f));
fwrite(color, 1, 3, fp);
}
fclose(fp);
}
void BuildBvhNodeList(Scene* scene) {
bvhElem_host = (BVHElem*) malloc(num_bvhElems * sizeof(BVHElem));
bound_host = (Bounds3*) malloc(num_bvhElems * sizeof(Bounds3));
std::vector<int> leafIdx;
for (int i = 0; i<scene->objects.size(); i++) {
MeshTriangle* mt = (MeshTriangle*)(scene->objects[i]);
num_triangles += mt->triangles.size();
}
triangle_host = (Triangle*) malloc(num_triangles * sizeof(Triangle));
// BFS for BVH Tree
BVHBuildNode* root = scene->bvh->root;
std::queue<BVHBuildNode*> nodeQueue;
if (root != NULL)
nodeQueue.push(root);
int triCount = 0;
while (!nodeQueue.empty())
{
BVHBuildNode* nd = nodeQueue.front();
BVHElem &curElem = bvhElem_host[nd->nodeIdx];
curElem.boundIdx = nd->nodeIdx;
curElem.leftIdx = (nd->left) ? (nd->left->nodeIdx) : -1;
curElem.rightIdx = (nd->right) ? (nd->right->nodeIdx) : -1;
curElem.isLeaf = (nd->object);
if (curElem.isLeaf) {
MeshTriangle* mt = (MeshTriangle*)(nd->object);
curElem.triStartIdx = triCount;
curElem.triNum = mt->triangles.size();
for (int j = 0; j < curElem.triNum; j++) {
triangle_host[curElem.triStartIdx + j] = mt->triangles[j];
triangle_host[curElem.triStartIdx + j].m = mt->m;
triCount++;
}
leafIdx.push_back(curElem.boundIdx);
}
else {
curElem.triStartIdx = -1;
curElem.triNum = 0;
}
bound_host[nd->nodeIdx] = nd->bounds;
nodeQueue.pop();
if (nd->left)
{
nodeQueue.push(nd->left);
}
if (nd->right)
{
nodeQueue.push(nd->right);
}
}
}
| 059d64c49c5ba365bbe8353fbd647cc9792443fa.cu | //
// Created by zhanx on 11/10/2020.
//
#include <cuda.h>
#include <curand_kernel.h>
#include <cstdio>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include <queue>
#include "Scene.h"
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
std::cerr << cudaGetErrorString(result) << "\n";
cudaDeviceReset(); // Make sure we call CUDA Device Reset before exiting
exit(99);
}
}
#define CELL_X_NUM 32 // cell number in x axis
#define CELL_Y_NUM 32 // cell number in y axis
// CPU Memory
static dim3 blocks;
static dim3 threads;
static int num_pixles;
static int num_bvhElems;
static int num_triangles;
static Scene* scene_host = NULL;
BVHElem* bvhElem_host = NULL;
Bounds3* bound_host = NULL;
Triangle* triangle_host = NULL;
// GPU Memory
curandState *devStates;
static Ray* ray_dev = NULL;
BVHElem* bvhElem_dev = NULL;
Material* material_dev = NULL;
Triangle* triangle_dev = NULL;
Bounds3* bound_dev = NULL; // bounding box for objects not triangle
// Unified Memory
float *frameBuffer = NULL;
// cpu internal function definition
void BuildBvhNodeList(Scene* scene);
__device__ Vector3f Material::cudaSample (const Vector3f &wi, const Vector3f &N , curandState *state, int pid) { // sample a ray by Material properties
switch(m_type){
case DIFFUSE:
{
// uniform sample on the hemisphere
curandState localState = state[pid];
curand_init((unsigned int) clock64(), pid, 0, &localState);
float x_1 = curand_uniform(&localState);
curand_init((unsigned int) clock64(), pid, 0, &localState);
float x_2 = curand_uniform(&localState);
float z = fabs(1.0f - 2.0f * x_1);
float r = sqrt(1.0f - z * z), phi = 2 * M_PI * x_2;
Vector3f localRay(r*std::cos(phi), r*std::sin(phi), z);
return toWorld(localRay, N);
break;
}
}
return Vector3f(0, 0, 0);
}
__device__ void Triangle::CudaSample (Intersection &pos, float &pdf, curandState *state, int pid) {
curandState localState = state[pid];
float x = sqrt(curand_uniform(&localState)), y = curand_uniform(&localState);
pos.emit = this->m->m_emission;
pos.coords = v0 * (1.0f - x) + v1 * (x * (1.0f - y)) + v2 * (x * y);
pos.normal = this->normal;
pdf = 1.0f / area;
}
void InitRender(Scene* scene) {
// thread limit per block is 1024
blocks = dim3(CELL_X_NUM, CELL_Y_NUM);
threads = dim3((scene->width + blocks.x - 1) / blocks.x,
(scene->height + blocks.y - 1) / blocks.y);
num_pixles = scene->width * scene->height;
num_bvhElems = scene->bvh->nodeCount;
// convert BVH to BVHElem
BuildBvhNodeList(scene);
// Load to CPU Memory
scene_host = scene;
checkCudaErrors(cudaMalloc(&ray_dev, num_pixles*sizeof(Ray)));
if (num_bvhElems > 0) {
checkCudaErrors(cudaMalloc(&bvhElem_dev, num_bvhElems*sizeof(BVHElem)));
checkCudaErrors(cudaMemcpy(bvhElem_dev,bvhElem_host,
num_bvhElems*sizeof(BVHElem), cudaMemcpyHostToDevice));
}
if (num_triangles > 0) {
checkCudaErrors(cudaMalloc(&material_dev, num_triangles*sizeof(Material)));
for (int i = 0; i<num_triangles; i++) {
checkCudaErrors(cudaMemcpy(&material_dev[i], triangle_host[i].m, sizeof(Material), cudaMemcpyHostToDevice));
}
}
if (num_triangles > 0) {
checkCudaErrors(cudaMalloc(&triangle_dev, num_triangles*sizeof(Triangle)));
checkCudaErrors(cudaMemcpy(triangle_dev, triangle_host, num_triangles*sizeof(Triangle), cudaMemcpyHostToDevice));
// TODO: allcoate material_dev address to triangle_dev, failed
// for (int i = 0; i<num_triangles; i++) {
// checkCudaErrors(cudaMemcpy(&triangle_dev[i].m, &material_dev[i], sizeof(Material*), cudaMemcpyDeviceToDevice));
// }
}
if (num_bvhElems > 0) {
checkCudaErrors(cudaMalloc(&bound_dev, num_bvhElems*sizeof(Bounds3)));
checkCudaErrors(cudaMemcpy(bound_dev, bound_host,
num_bvhElems*sizeof(Bounds3), cudaMemcpyHostToDevice));
}
// Init frame buffer in Unified Memory
checkCudaErrors(cudaMallocManaged((void **)&frameBuffer, 3*num_pixles*sizeof(float)));
for (int i = 0; i<3*num_pixles; i++) {
frameBuffer[i] = 0;
}
// init cuda random generator
int threadNum = blocks.x * blocks.y * threads.x * threads.y;
checkCudaErrors(cudaMalloc((void **)&devStates, threadNum * sizeof(curandState)));
}
void FreeRender() {
// free CPU
free(bvhElem_host);
free(bound_host);
free(triangle_host);
// free GPU
cudaFree(ray_dev);
cudaFree(bvhElem_dev);
cudaFree(material_dev);
cudaFree(triangle_dev);
cudaFree(bound_dev);
cudaFree(frameBuffer);
cudaFree(devStates);
}
__device__ float cudaRandomFloat(curandState *state, int pid) {
curandState localState = state[pid];
curand_init((unsigned int) clock64(), pid, 0, &localState);
return curand_uniform(&localState);
}
__device__ void sampleLight(Intersection &pos, float &pdf,
int triangleNum, int pid,
Triangle* triangles, curandState *state) {
float emit_area_sum = 0;
// assume we only have one light in the scene
for (int k = 0; k < triangleNum; k++) {
if (triangles[k].cudaHasEmit()){
emit_area_sum += triangles[k].getArea();
}
}
float p = cudaRandomFloat(state, pid) * emit_area_sum;
emit_area_sum = 0;
for (int k = 0; k < triangleNum; k++) {
if (triangles[k].cudaHasEmit()){
emit_area_sum += triangles[k].getArea();
if (p <= emit_area_sum){
triangles[k].CudaSample(pos, pdf, state, pid);
break;
}
}
}
}
__device__ Intersection SceneIntersect(int pid, Ray ray, int bvhElemNum, int triangleNum,
BVHElem* bvhElems, Triangle* triangles, Bounds3* bounds) {
Intersection inter;
inter.coords = Vector3f(-1);
if (bvhElems == NULL)
return inter;
bool visited[32];
for (int i = 0; i<bvhElemNum; i++) {
bvhElems[i].visited = false;
visited[i] = false;
}
int arr[3] = {(ray.direction.x <= 0), (ray.direction.y <= 0), (ray.direction.z <= 0)};
// DFS BVHElem
int istack[32];
istack[0] = bvhElems[0].boundIdx;
int curSize = 1;
while (curSize > 0) {
BVHElem &curElem = bvhElems[istack[curSize - 1]];
bool vl = (curElem.leftIdx < 0 || visited[curElem.leftIdx]);
bool vr = (curElem.rightIdx < 0 || visited[curElem.rightIdx]);
if (vl && vr) {
visited[curElem.boundIdx] = true;
//curElem.visited = true;
curSize--;
if (curElem.isLeaf) { // node is leaf
for (int a = 0; a < curElem.triNum; a++) { // find intersection with all triangles of this object
Intersection ci = triangles[curElem.triStartIdx + a].getIntersection(ray);
if (ci.happened && (ci.distance<inter.distance)) {
inter = ci;
}
}
}
}
else {
if (curElem.leftIdx >= 0 && !visited[curElem.leftIdx]) {
// if (curElem.leftIdx >= 0 && !bvhElems[curElem.leftIdx].visited) {
BVHElem &left = bvhElems[curElem.leftIdx];
if (bounds[left.boundIdx].IntersectP(ray,
Vector3f(1 / (float) ray.direction.x, 1 / (float) ray.direction.y,
1 / (float) ray.direction.z), arr)) {
// if hit left bounding box
istack[curSize] = curElem.leftIdx;
curSize++;
} else {
visited[left.boundIdx] = true;
//left.visited = true;
}
}
if (curElem.rightIdx >= 0 && !visited[curElem.rightIdx]) {
BVHElem &right = bvhElems[curElem.rightIdx];
if (bounds[right.boundIdx].IntersectP(ray,
Vector3f(1 / (float) ray.direction.x, 1 / (float) ray.direction.y,
1 / (float) ray.direction.z), arr)) {
istack[curSize] = curElem.rightIdx;
curSize++;
} else {
visited[right.boundIdx] = true;
//right.visited = true;
}
}
if (curSize > 32) {
// TODO: handle CUDA kernel error
printf("stack overflow %d\n", 32);
}
}
}
return inter;
}
__device__ Vector3f CalcColor(int pid, int bvhElemNum, int triangleNum,
Ray* rays, BVHElem* bvhElems, Triangle* triangles, Bounds3* bounds, Material* materials,
curandState *state) {
Vector3f backgroundColor = Vector3f(0.235294, 0.67451, 0.843137);
Ray curRay = rays[pid];
Vector3f pixelColor = Vector3f(0,0,0);
int maxDepth = 3; // maxDepth cannot exceed stackSize/2
float RussianRoulette = 0.8;
Vector3f vstack[32];
for (int i = 0; i<32; i++) {
vstack[i] = Vector3f(0, 0, 0);
}
// 这里用stack实现递归,stack内部:
// 0,1: depth = 0时, in_dir 的颜色 + dir color 需要的系数
// 2,3: depth = 1时, in_dir 的颜色 + dir color 需要的系数, 以此类推
int curDepth = 0;
for (int d = 0; d <= maxDepth; d++) {
curDepth = d;
Intersection intersection = SceneIntersect(pid, curRay, bvhElemNum, triangleNum, bvhElems, triangles, bounds);
if(!intersection.happened) {
vstack[d*2+0] = backgroundColor;
break;
}
if (intersection.m != NULL && intersection.m->cudaHasEmission()) {
vstack[d*2+0] = Vector3f(1.0,1.0,1.0);
break;
}
// contribution from the light source
Vector3f dir_color = Vector3f(0, 0, 0);
float pdf_light;
Intersection lightPoint;
sampleLight(lightPoint, pdf_light, triangleNum, pid, triangles, state);
lightPoint.normal.normalized();
Vector3f w_dir = normalize(lightPoint.coords - intersection.coords);
Ray shadowRay(intersection.coords, w_dir);
Intersection shadowRayInter = SceneIntersect(pid, shadowRay, bvhElemNum, triangleNum, bvhElems, triangles, bounds);
// if light ray not blocked in the middle
if (!shadowRayInter.happened || shadowRayInter.m->cudaHasEmission())
{
if (pdf_light < FLT_EPSILON)
pdf_light = FLT_EPSILON;
Vector3f f_r1 = intersection.m->eval(-curRay.direction, w_dir, intersection.normal);
float kk = dotProduct(intersection.coords - lightPoint.coords, intersection.coords - lightPoint.coords);
dir_color = lightPoint.emit * f_r1 * dotProduct(w_dir, intersection.normal)
* dotProduct(-w_dir, lightPoint.normal) / kk / pdf_light;
}
vstack[d*2+0] = dir_color;
// contribution from other objects
// Russian Roulette
bool needBreak = true;
Vector3f indir_color = Vector3f(0,0,0);
float testrr = cudaRandomFloat(state, pid);
Vector3f randomDir;
if (testrr <= RussianRoulette) {
randomDir = intersection.m->cudaSample(-curRay.direction, intersection.normal, state, pid);
randomDir = randomDir.normalized();
float pdf_object = intersection.m->pdf(-curRay.direction, randomDir,intersection.normal);
Ray ro(intersection.coords, randomDir);
Intersection objRayInter = SceneIntersect(pid, ro, bvhElemNum, triangleNum, bvhElems, triangles, bounds);
if (objRayInter.happened)
if (!objRayInter.m->cudaHasEmission()) {
if (pdf_object < FLT_EPSILON)
pdf_object = FLT_EPSILON;
Vector3f f_r2 = intersection.m->eval(-curRay.direction, ro.direction, intersection.normal);
indir_color = f_r2 * dotProduct(ro.direction, intersection.normal) / pdf_object / RussianRoulette;
curRay = ro;
needBreak = false;
}
}
vstack[d * 2 + 1] = indir_color;
if (needBreak)
break;
}
// 这里反过来推算颜色
for (int i = curDepth; i>0; i--) {
// in_dir + dir
Vector3f prev = vstack[i*2] + vstack[i*2+1];
vstack[2*i-1] = vstack[2*i-1] * prev;
}
pixelColor = vstack[0] + vstack[1];
return pixelColor;
}
__global__ void SetKernelRand(curandState *state, int h, int w)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int id = i + j * w;
/* Each thread gets same seed, a different sequence
number, no offset */
curand_init(100*sizeof(curandState), id, 0, &state[id]);
}
__device__ float deg2rad(const float& deg) { return deg * M_PI / 180.0; }
__global__ void GenerateRay(int width, int height, double fov, float* fb, Ray* rays) {
Vector3f eye_pos(278, 273, -800);
float scale = tan(deg2rad(fov * 0.5));
float imageAspectRatio = width / (float)height;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < (float)width && j < (float)height)
{
int pixelIdx = i + (j * width);
float x = (2 * (i + 0.5) / (float)width - 1) *
imageAspectRatio * scale;
float y = (1 - 2 * (j + 0.5) / (float)height) * scale;
Vector3f dir = normalize(Vector3f(-x, y, 1));
Ray &ray = rays[pixelIdx];
ray.origin = eye_pos;
ray.direction = dir;
}
}
__global__ void CastRay(int width, int height, float* fb, Ray* rays,
int bvhElemNum, int triangleNum,
BVHElem* bvhElems, Triangle* triangles, Bounds3* bounds, Material* materials,
curandState *state) {
int spp = 8;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < width && j < height) {
int pixelIdx = i + (j * width);
fb[pixelIdx*3+0] = 0;
fb[pixelIdx*3+1] = 0;
fb[pixelIdx*3+2] = 0;
for (int time = 0; time < spp; time++) {
Vector3f c = CalcColor(pixelIdx, bvhElemNum, triangleNum,
rays, bvhElems, triangles, bounds, materials, state);
fb[pixelIdx*3+0] += c.x;
fb[pixelIdx*3+1] += c.y;
fb[pixelIdx*3+2] += c.z;
}
fb[pixelIdx*3+0] /= (float) spp;
fb[pixelIdx*3+1] /= (float) spp;
fb[pixelIdx*3+2] /= (float) spp;
}
}
__global__ void SetTriangleValue(int triangleNum, Triangle* triangles, Material* materials) {
for (int i = 0; i<triangleNum; i++) {
triangles[i].m = materials+i;
}
}
void Render() {
SetKernelRand<<<blocks, threads>>>(devStates, scene_host->height, scene_host->width);
SetTriangleValue<<<blocks, threads>>>(num_triangles, triangle_dev, material_dev);
GenerateRay<<<blocks, threads>>>(scene_host->width, scene_host->height, scene_host->fov, frameBuffer, ray_dev);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
CastRay<<<blocks, threads>>>(scene_host->width, scene_host->height, frameBuffer,
ray_dev,num_bvhElems, num_triangles,
bvhElem_dev, triangle_dev, bound_dev, material_dev,
devStates);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// save color data to ppm file
FILE* fp = fopen("image.ppm", "wb");
(void)fprintf(fp, "P6\n%d %d\n255\n", scene_host->width, scene_host->height);
for (auto i = 0; i < num_pixles; ++i) {
static unsigned char color[3];
color[0] = (unsigned char)(255 * std::pow(clamp(0, 1, frameBuffer[i*3+0]), 0.6f));
color[1] = (unsigned char)(255 * std::pow(clamp(0, 1, frameBuffer[i*3+1]), 0.6f));
color[2] = (unsigned char)(255 * std::pow(clamp(0, 1, frameBuffer[i*3+2]), 0.6f));
fwrite(color, 1, 3, fp);
}
fclose(fp);
}
void BuildBvhNodeList(Scene* scene) {
bvhElem_host = (BVHElem*) malloc(num_bvhElems * sizeof(BVHElem));
bound_host = (Bounds3*) malloc(num_bvhElems * sizeof(Bounds3));
std::vector<int> leafIdx;
for (int i = 0; i<scene->objects.size(); i++) {
MeshTriangle* mt = (MeshTriangle*)(scene->objects[i]);
num_triangles += mt->triangles.size();
}
triangle_host = (Triangle*) malloc(num_triangles * sizeof(Triangle));
// BFS for BVH Tree
BVHBuildNode* root = scene->bvh->root;
std::queue<BVHBuildNode*> nodeQueue;
if (root != NULL)
nodeQueue.push(root);
int triCount = 0;
while (!nodeQueue.empty())
{
BVHBuildNode* nd = nodeQueue.front();
BVHElem &curElem = bvhElem_host[nd->nodeIdx];
curElem.boundIdx = nd->nodeIdx;
curElem.leftIdx = (nd->left) ? (nd->left->nodeIdx) : -1;
curElem.rightIdx = (nd->right) ? (nd->right->nodeIdx) : -1;
curElem.isLeaf = (nd->object);
if (curElem.isLeaf) {
MeshTriangle* mt = (MeshTriangle*)(nd->object);
curElem.triStartIdx = triCount;
curElem.triNum = mt->triangles.size();
for (int j = 0; j < curElem.triNum; j++) {
triangle_host[curElem.triStartIdx + j] = mt->triangles[j];
triangle_host[curElem.triStartIdx + j].m = mt->m;
triCount++;
}
leafIdx.push_back(curElem.boundIdx);
}
else {
curElem.triStartIdx = -1;
curElem.triNum = 0;
}
bound_host[nd->nodeIdx] = nd->bounds;
nodeQueue.pop();
if (nd->left)
{
nodeQueue.push(nd->left);
}
if (nd->right)
{
nodeQueue.push(nd->right);
}
}
}
|
b179967f188e2d349a8eab8c27db490363ee1ed3.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../../../src/common/common.h"
#include "../../../src/data/ellpack_page.cuh" // for EllpackPageImpl
#include "../../../src/data/ellpack_page.h" // for EllpackPage
#include "../../../src/tree/param.h" // for TrainParam
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "xgboost/context.h"
#include "xgboost/json.h"
namespace xgboost::tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::safe_cuda(hipSetDevice(0));
constexpr size_t kNBins = 128;
constexpr int kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogramStorage<kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Add two new nodes
histogram.AllocateHistograms({kNNodes});
histogram.AllocateHistograms({kNNodes + 1});
// Old cached nodes should still exist
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Should be deleted
ASSERT_FALSE(histogram.HistogramExists(kNNodes));
// Most recent node should exist
ASSERT_TRUE(histogram.HistogramExists(kNNodes + 1));
// Add same node again - should fail
EXPECT_ANY_THROW(histogram.AllocateHistograms({kNNodes + 1}););
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
Args args{
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
Context ctx{MakeCUDACtx(0)};
auto cs = std::make_shared<common::ColumnSampler>(0);
GPUHistMakerDevice maker(&ctx, /*is_external_memory=*/false, {}, kNRows, param, cs, kNCols,
batch_param);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
HostDeviceVector<GradientPair> gpair(kNRows);
for (auto &gp : gpair.HostVector()) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gp = GradientPair(grad, hess);
}
gpair.SetDevice(0);
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector());
maker.row_partitioner = std::make_unique<RowPartitioner>(0, kNRows);
maker.hist.Init(0, page->Cuts().TotalBins());
maker.hist.AllocateHistograms({0});
maker.gpair = gpair.DeviceSpan();
maker.quantiser = std::make_unique<GradientQuantiser>(maker.gpair);
maker.page = page.get();
maker.InitFeatureGroupsOnce();
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
maker.feature_groups->DeviceAccessor(0), gpair.DeviceSpan(),
maker.row_partitioner->GetRows(0), maker.hist.GetNodeHistogram(0),
*maker.quantiser, !use_shared_memory_histograms);
DeviceHistogramStorage<>& d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientPairInt64> h_result (node_histogram.size());
dh::safe_cuda(hipMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(),
hipMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
for (size_t i = 0; i < h_result.size(); ++i) {
auto result = maker.quantiser->ToFloatingPoint(h_result[i]);
ASSERT_NEAR(result.GetGrad(), solution[i].GetGrad(), 0.01f);
ASSERT_NEAR(result.GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
inline GradientQuantiser DummyRoundingFactor() {
thrust::device_vector<GradientPair> gpair(1);
gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000
return GradientQuantiser(dh::ToSpan(gpair));
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{&ctx, &task}, hist_maker_ext{&ctx, &task};
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
Args training_params = {{"max_depth", "10"}, {"max_leaves", "0"}};
TrainParam param;
param.UpdateAllowUnknown(training_params);
hist_maker.Configure(training_params);
hist_maker.InitDataOnce(¶m, hist_maker_dmat.get());
hist_maker_ext.Configure(training_params);
hist_maker_ext.InitDataOnce(¶m, hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
auto grad = GenerateRandomGradients(kNRows);
grad.SetDevice(0);
maker->Reset(&grad, hist_maker_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector());
const auto &maker_ext = hist_maker_ext.maker;
maker_ext->Reset(&grad, hist_maker_ext_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
void UpdateTree(Context const* ctx, HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
size_t gpu_page_size, RegTree* tree, HostDeviceVector<bst_float>* preds,
float subsample = 1.0f, const std::string& sampling_method = "uniform",
int max_bin = 2) {
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>(
ctx, BatchParam{max_bin, TrainParam::DftSparseThreshold()})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(max_bin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"subsample", std::to_string(subsample)},
{"sampling_method", sampling_method},
};
TrainParam param;
param.UpdateAllowUnknown(args);
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{ctx, &task};
hist_maker.Configure(Args{});
std::vector<HostDeviceVector<bst_node_t>> position(1);
hist_maker.Update(¶m, gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position},
{tree});
auto cache = linalg::MakeTensorView(ctx, preds->DeviceSpan(), preds->Size(), 1);
hist_maker.UpdatePredictionCache(dmat, cache);
}
TEST(GpuHist, UniformSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "uniform",
kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
}
}
TEST(GpuHist, GradientBasedSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"gradient_based", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
}
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
dmlc::TemporaryDirectory tmpdir;
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
Context ctx(MakeCUDACtx(0));
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
}
}
TEST(GpuHist, ExternalMemoryWithSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
const std::string kSamplingMethod = "gradient_based";
common::GlobalRandom().seed(0);
dmlc::TemporaryDirectory tmpdir;
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
auto rng = common::GlobalRandom();
Context ctx(MakeCUDACtx(0));
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod, kRows);
// Build another tree using multiple ELLPACK pages.
common::GlobalRandom() = rng;
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, kSubsample,
kSamplingMethod, kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
}
}
TEST(GpuHist, ConfigIO) {
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
std::unique_ptr<TreeUpdater> updater{TreeUpdater::Create("grow_gpu_hist", &ctx, &task)};
updater->Configure(Args{});
Json j_updater{Object{}};
updater->SaveConfig(&j_updater);
ASSERT_TRUE(IsA<Object>(j_updater["hist_train_param"]));
updater->LoadConfig(j_updater);
Json j_updater_roundtrip{Object{}};
updater->SaveConfig(&j_updater_roundtrip);
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["hist_train_param"]));
ASSERT_EQ(j_updater, j_updater_roundtrip);
}
TEST(GpuHist, MaxDepth) {
Context ctx(MakeCUDACtx(0));
size_t constexpr kRows = 16;
size_t constexpr kCols = 4;
auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat}));
learner->SetParam("max_depth", "32");
learner->Configure();
ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error);
}
} // namespace xgboost::tree
| b179967f188e2d349a8eab8c27db490363ee1ed3.cu | /**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <xgboost/base.h>
#include <random>
#include <string>
#include <vector>
#include "../../../src/common/common.h"
#include "../../../src/data/ellpack_page.cuh" // for EllpackPageImpl
#include "../../../src/data/ellpack_page.h" // for EllpackPage
#include "../../../src/tree/param.h" // for TrainParam
#include "../../../src/tree/updater_gpu_hist.cu"
#include "../filesystem.h" // dmlc::TemporaryDirectory
#include "../helpers.h"
#include "../histogram_helpers.h"
#include "xgboost/context.h"
#include "xgboost/json.h"
namespace xgboost::tree {
TEST(GpuHist, DeviceHistogram) {
// Ensures that node allocates correctly after reaching `kStopGrowingSize`.
dh::safe_cuda(cudaSetDevice(0));
constexpr size_t kNBins = 128;
constexpr int kNNodes = 4;
constexpr size_t kStopGrowing = kNNodes * kNBins * 2u;
DeviceHistogramStorage<kStopGrowing> histogram;
histogram.Init(0, kNBins);
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
histogram.Reset();
ASSERT_EQ(histogram.Data().size(), kStopGrowing);
// Use allocated memory but do not erase nidx_map.
for (int i = 0; i < kNNodes; ++i) {
histogram.AllocateHistograms({i});
}
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Add two new nodes
histogram.AllocateHistograms({kNNodes});
histogram.AllocateHistograms({kNNodes + 1});
// Old cached nodes should still exist
for (int i = 0; i < kNNodes; ++i) {
ASSERT_TRUE(histogram.HistogramExists(i));
}
// Should be deleted
ASSERT_FALSE(histogram.HistogramExists(kNNodes));
// Most recent node should exist
ASSERT_TRUE(histogram.HistogramExists(kNNodes + 1));
// Add same node again - should fail
EXPECT_ANY_THROW(histogram.AllocateHistograms({kNNodes + 1}););
}
std::vector<GradientPairPrecise> GetHostHistGpair() {
// 24 bins, 3 bins for each feature (column).
std::vector<GradientPairPrecise> hist_gpair = {
{0.8314f, 0.7147f}, {1.7989f, 3.7312f}, {3.3846f, 3.4598f},
{2.9277f, 3.5886f}, {1.8429f, 2.4152f}, {1.2443f, 1.9019f},
{1.6380f, 2.9174f}, {1.5657f, 2.5107f}, {2.8111f, 2.4776f},
{2.1322f, 3.0651f}, {3.2927f, 3.8540f}, {0.5899f, 0.9866f},
{1.5185f, 1.6263f}, {2.0686f, 3.1844f}, {2.4278f, 3.0950f},
{1.5105f, 2.1403f}, {2.6922f, 4.2217f}, {1.8122f, 1.5437f},
{0.0000f, 0.0000f}, {4.3245f, 5.7955f}, {1.6903f, 2.1103f},
{2.4012f, 4.4754f}, {3.6136f, 3.4303f}, {0.0000f, 0.0000f}
};
return hist_gpair;
}
template <typename GradientSumT>
void TestBuildHist(bool use_shared_memory_histograms) {
int const kNRows = 16, kNCols = 8;
TrainParam param;
Args args{
{"max_depth", "6"},
{"max_leaves", "0"},
};
param.Init(args);
auto page = BuildEllpackPage(kNRows, kNCols);
BatchParam batch_param{};
Context ctx{MakeCUDACtx(0)};
auto cs = std::make_shared<common::ColumnSampler>(0);
GPUHistMakerDevice maker(&ctx, /*is_external_memory=*/false, {}, kNRows, param, cs, kNCols,
batch_param);
xgboost::SimpleLCG gen;
xgboost::SimpleRealUniformDistribution<bst_float> dist(0.0f, 1.0f);
HostDeviceVector<GradientPair> gpair(kNRows);
for (auto &gp : gpair.HostVector()) {
bst_float grad = dist(&gen);
bst_float hess = dist(&gen);
gp = GradientPair(grad, hess);
}
gpair.SetDevice(0);
thrust::host_vector<common::CompressedByteT> h_gidx_buffer (page->gidx_buffer.HostVector());
maker.row_partitioner = std::make_unique<RowPartitioner>(0, kNRows);
maker.hist.Init(0, page->Cuts().TotalBins());
maker.hist.AllocateHistograms({0});
maker.gpair = gpair.DeviceSpan();
maker.quantiser = std::make_unique<GradientQuantiser>(maker.gpair);
maker.page = page.get();
maker.InitFeatureGroupsOnce();
BuildGradientHistogram(ctx.CUDACtx(), page->GetDeviceAccessor(0),
maker.feature_groups->DeviceAccessor(0), gpair.DeviceSpan(),
maker.row_partitioner->GetRows(0), maker.hist.GetNodeHistogram(0),
*maker.quantiser, !use_shared_memory_histograms);
DeviceHistogramStorage<>& d_hist = maker.hist;
auto node_histogram = d_hist.GetNodeHistogram(0);
// d_hist.data stored in float, not gradient pair
thrust::host_vector<GradientPairInt64> h_result (node_histogram.size());
dh::safe_cuda(cudaMemcpy(h_result.data(), node_histogram.data(), node_histogram.size_bytes(),
cudaMemcpyDeviceToHost));
std::vector<GradientPairPrecise> solution = GetHostHistGpair();
for (size_t i = 0; i < h_result.size(); ++i) {
auto result = maker.quantiser->ToFloatingPoint(h_result[i]);
ASSERT_NEAR(result.GetGrad(), solution[i].GetGrad(), 0.01f);
ASSERT_NEAR(result.GetHess(), solution[i].GetHess(), 0.01f);
}
}
TEST(GpuHist, BuildHistGlobalMem) {
TestBuildHist<GradientPairPrecise>(false);
}
TEST(GpuHist, BuildHistSharedMem) {
TestBuildHist<GradientPairPrecise>(true);
}
HistogramCutsWrapper GetHostCutMatrix () {
HistogramCutsWrapper cmat;
cmat.SetPtrs({0, 3, 6, 9, 12, 15, 18, 21, 24});
cmat.SetMins({0.1f, 0.2f, 0.3f, 0.1f, 0.2f, 0.3f, 0.2f, 0.2f});
// 24 cut fields, 3 cut fields for each feature (column).
// Each row of the cut represents the cuts for a data column.
cmat.SetValues({0.30f, 0.67f, 1.64f,
0.32f, 0.77f, 1.95f,
0.29f, 0.70f, 1.80f,
0.32f, 0.75f, 1.85f,
0.18f, 0.59f, 1.69f,
0.25f, 0.74f, 2.00f,
0.26f, 0.74f, 1.98f,
0.26f, 0.71f, 1.83f});
return cmat;
}
inline GradientQuantiser DummyRoundingFactor() {
thrust::device_vector<GradientPair> gpair(1);
gpair[0] = {1000.f, 1000.f}; // Tests should not exceed sum of 1000
return GradientQuantiser(dh::ToSpan(gpair));
}
void TestHistogramIndexImpl() {
// Test if the compressed histogram index matches when using a sparse
// dmatrix with and without using external memory
int constexpr kNRows = 1000, kNCols = 10;
// Build 2 matrices and build a histogram maker with that
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{&ctx, &task}, hist_maker_ext{&ctx, &task};
std::unique_ptr<DMatrix> hist_maker_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 0, true));
dmlc::TemporaryDirectory tempdir;
std::unique_ptr<DMatrix> hist_maker_ext_dmat(
CreateSparsePageDMatrixWithRC(kNRows, kNCols, 128UL, true, tempdir));
Args training_params = {{"max_depth", "10"}, {"max_leaves", "0"}};
TrainParam param;
param.UpdateAllowUnknown(training_params);
hist_maker.Configure(training_params);
hist_maker.InitDataOnce(¶m, hist_maker_dmat.get());
hist_maker_ext.Configure(training_params);
hist_maker_ext.InitDataOnce(¶m, hist_maker_ext_dmat.get());
// Extract the device maker from the histogram makers and from that its compressed
// histogram index
const auto &maker = hist_maker.maker;
auto grad = GenerateRandomGradients(kNRows);
grad.SetDevice(0);
maker->Reset(&grad, hist_maker_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer(maker->page->gidx_buffer.HostVector());
const auto &maker_ext = hist_maker_ext.maker;
maker_ext->Reset(&grad, hist_maker_ext_dmat.get(), kNCols);
std::vector<common::CompressedByteT> h_gidx_buffer_ext(maker_ext->page->gidx_buffer.HostVector());
ASSERT_EQ(maker->page->Cuts().TotalBins(), maker_ext->page->Cuts().TotalBins());
ASSERT_EQ(maker->page->gidx_buffer.Size(), maker_ext->page->gidx_buffer.Size());
}
TEST(GpuHist, TestHistogramIndex) {
TestHistogramIndexImpl();
}
void UpdateTree(Context const* ctx, HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
size_t gpu_page_size, RegTree* tree, HostDeviceVector<bst_float>* preds,
float subsample = 1.0f, const std::string& sampling_method = "uniform",
int max_bin = 2) {
if (gpu_page_size > 0) {
// Loop over the batches and count the records
int64_t batch_count = 0;
int64_t row_count = 0;
for (const auto& batch : dmat->GetBatches<EllpackPage>(
ctx, BatchParam{max_bin, TrainParam::DftSparseThreshold()})) {
EXPECT_LT(batch.Size(), dmat->Info().num_row_);
batch_count++;
row_count += batch.Size();
}
EXPECT_GE(batch_count, 2);
EXPECT_EQ(row_count, dmat->Info().num_row_);
}
Args args{
{"max_depth", "2"},
{"max_bin", std::to_string(max_bin)},
{"min_child_weight", "0.0"},
{"reg_alpha", "0"},
{"reg_lambda", "0"},
{"subsample", std::to_string(subsample)},
{"sampling_method", sampling_method},
};
TrainParam param;
param.UpdateAllowUnknown(args);
ObjInfo task{ObjInfo::kRegression};
tree::GPUHistMaker hist_maker{ctx, &task};
hist_maker.Configure(Args{});
std::vector<HostDeviceVector<bst_node_t>> position(1);
hist_maker.Update(¶m, gpair, dmat, common::Span<HostDeviceVector<bst_node_t>>{position},
{tree});
auto cache = linalg::MakeTensorView(ctx, preds->DeviceSpan(), preds->Size(), 1);
hist_maker.UpdatePredictionCache(dmat, cache);
}
TEST(GpuHist, UniformSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample, "uniform",
kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-8);
}
}
TEST(GpuHist, GradientBasedSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr float kSubsample = 0.9999;
common::GlobalRandom().seed(1994);
// Create an in-memory DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrixWithRC(kRows, kCols, 0, true));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
Context ctx(MakeCUDACtx(0));
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using sampling.
RegTree tree_sampling;
HostDeviceVector<bst_float> preds_sampling(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree_sampling, &preds_sampling, kSubsample,
"gradient_based", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_sampling_h = preds_sampling.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_sampling_h[i], 1e-3);
}
}
TEST(GpuHist, ExternalMemory) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
dmlc::TemporaryDirectory tmpdir;
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
RegTree tree;
Context ctx(MakeCUDACtx(0));
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, 1.0, "uniform", kRows);
// Build another tree using multiple ELLPACK pages.
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, 1.0, "uniform", kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
EXPECT_NEAR(preds_h[i], preds_ext_h[i], 1e-6);
}
}
TEST(GpuHist, ExternalMemoryWithSampling) {
constexpr size_t kRows = 4096;
constexpr size_t kCols = 2;
constexpr size_t kPageSize = 1024;
constexpr float kSubsample = 0.5;
const std::string kSamplingMethod = "gradient_based";
common::GlobalRandom().seed(0);
dmlc::TemporaryDirectory tmpdir;
// Create a single batch DMatrix.
std::unique_ptr<DMatrix> dmat(CreateSparsePageDMatrix(kRows, kCols, 1, tmpdir.path + "/cache"));
// Create a DMatrix with multiple batches.
std::unique_ptr<DMatrix> dmat_ext(
CreateSparsePageDMatrix(kRows, kCols, kRows / kPageSize, tmpdir.path + "/cache"));
auto gpair = GenerateRandomGradients(kRows);
// Build a tree using the in-memory DMatrix.
auto rng = common::GlobalRandom();
Context ctx(MakeCUDACtx(0));
RegTree tree;
HostDeviceVector<bst_float> preds(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat.get(), 0, &tree, &preds, kSubsample, kSamplingMethod, kRows);
// Build another tree using multiple ELLPACK pages.
common::GlobalRandom() = rng;
RegTree tree_ext;
HostDeviceVector<bst_float> preds_ext(kRows, 0.0, 0);
UpdateTree(&ctx, &gpair, dmat_ext.get(), kPageSize, &tree_ext, &preds_ext, kSubsample,
kSamplingMethod, kRows);
// Make sure the predictions are the same.
auto preds_h = preds.ConstHostVector();
auto preds_ext_h = preds_ext.ConstHostVector();
for (size_t i = 0; i < kRows; i++) {
ASSERT_NEAR(preds_h[i], preds_ext_h[i], 1e-3);
}
}
TEST(GpuHist, ConfigIO) {
Context ctx(MakeCUDACtx(0));
ObjInfo task{ObjInfo::kRegression};
std::unique_ptr<TreeUpdater> updater{TreeUpdater::Create("grow_gpu_hist", &ctx, &task)};
updater->Configure(Args{});
Json j_updater{Object{}};
updater->SaveConfig(&j_updater);
ASSERT_TRUE(IsA<Object>(j_updater["hist_train_param"]));
updater->LoadConfig(j_updater);
Json j_updater_roundtrip{Object{}};
updater->SaveConfig(&j_updater_roundtrip);
ASSERT_TRUE(IsA<Object>(j_updater_roundtrip["hist_train_param"]));
ASSERT_EQ(j_updater, j_updater_roundtrip);
}
TEST(GpuHist, MaxDepth) {
Context ctx(MakeCUDACtx(0));
size_t constexpr kRows = 16;
size_t constexpr kCols = 4;
auto p_mat = RandomDataGenerator{kRows, kCols, 0}.GenerateDMatrix();
auto learner = std::unique_ptr<Learner>(Learner::Create({p_mat}));
learner->SetParam("max_depth", "32");
learner->Configure();
ASSERT_THROW({learner->UpdateOneIter(0, p_mat);}, dmlc::Error);
}
} // namespace xgboost::tree
|
ctoprim.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "thrust/reduce.h"
#include "thrust/device_ptr.h"
#include "header.h"
#include "util.h"
#define BLOCK_DIM 512
__device__ double d_courno;
__constant__ double GAMMA = 1.4E0;
__constant__ double CV = 8.3333333333E6;
#undef SQR
#define SQR(x) (__dmul_rn((x),(x)))
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double *courno // i/o
){
int i, j, k, idx, cour_idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
cour_idx = blockIdx.x * blockDim.x + threadIdx.x;
k = cour_idx / (g->dim_g[0] * g->dim_g[1]);
j = (cour_idx / g->dim_g[0]) % g->dim_g[1];
i = cour_idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
// Calculate new courno (excluding ng)
if( g->ng <= i && i <= g->hi[0]+g->ng &&
g->ng <= j && j <= g->hi[1]+g->ng &&
g->ng <= k && k <= g->hi[2]+g->ng ){
c = sqrt(GAMMA*q[idx+4*loffset]/q[idx]);
courx = (c+fabs(q[idx+loffset])) /g->dx[0];
coury = (c+fabs(q[idx+2*loffset]))/g->dx[1];
courz = (c+fabs(q[idx+3*loffset]))/g->dx[2];
courno[cour_idx] = MAX(courx, MAX(coury, courz));
}
else
courno[cour_idx] = -1.0; //TODO: make it minus infinity
}
}
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k, idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
idx = blockIdx.x * blockDim.x + threadIdx.x;
k = idx / (g->dim_g[0] * g->dim_g[1]);
j = (idx / g->dim_g[0]) % g->dim_g[1];
i = idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
}
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
hipLaunchKernelGGL(( gpu_ctoprim_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, d_const, u_d, q_d, h_const.temp[0]);
// Find max & update courno
// TODO: make it minus infinity
thrust::device_ptr<double> dev_ptr(h_const.temp[0]);
courno = thrust::reduce(dev_ptr, dev_ptr + len, (double) -INFINITY, thrust::maximum<double>());
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
// TODO: edit parameters
hipLaunchKernelGGL(( gpu_ctoprim_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, d_const, u_d, q_d);
}
#undef SQR
#define SQR(x) ((x)*(x))
#define u(i,j,k,l) u[l-1][i][j][k]
#define q(i,j,k,l) q[l-1][i][j][k]
#define dx(i) h.dx[i-1]
#define dxinv(i) h.dxinv[i-1]
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
// #pragma omp parallel for private(i, j, k, c, courx, coury, courz) reduction(max: courmx, courmy, courmz)
DO(k, h.lo[2], h.hi[2]){
DO(j, h.lo[1], h.hi[1]){
DO(i, h.lo[0], h.hi[0]){
c = sqrt(GAMMA*q(i,j,k,5)/q(i,j,k,1));
courx = ( c+fabs(q(i,j,k,2)) ) / dx(1); // I tried to change to * dxinv(1) but the results diverge.. (max diff = 5E-8)
coury = ( c+fabs(q(i,j,k,3)) ) / dx(2);
courz = ( c+fabs(q(i,j,k,4)) ) / dx(3);
courmx = MAX( courmx, courx );
courmy = MAX( courmy, coury );
courmz = MAX( courmz, courz );
}
}
}
//
// Compute running max of Courant number over grids.
//
courno = MAX(MAX(courmx, courmy), MAX(courmz, courno));
}
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
}
#undef u
#undef q
#undef dx
| ctoprim.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "thrust/reduce.h"
#include "thrust/device_ptr.h"
#include "header.h"
#include "util.h"
#define BLOCK_DIM 512
__device__ double d_courno;
__constant__ double GAMMA = 1.4E0;
__constant__ double CV = 8.3333333333E6;
#undef SQR
#define SQR(x) (__dmul_rn((x),(x)))
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double *courno // i/o
){
int i, j, k, idx, cour_idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
cour_idx = blockIdx.x * blockDim.x + threadIdx.x;
k = cour_idx / (g->dim_g[0] * g->dim_g[1]);
j = (cour_idx / g->dim_g[0]) % g->dim_g[1];
i = cour_idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
// Calculate new courno (excluding ng)
if( g->ng <= i && i <= g->hi[0]+g->ng &&
g->ng <= j && j <= g->hi[1]+g->ng &&
g->ng <= k && k <= g->hi[2]+g->ng ){
c = sqrt(GAMMA*q[idx+4*loffset]/q[idx]);
courx = (c+fabs(q[idx+loffset])) /g->dx[0];
coury = (c+fabs(q[idx+2*loffset]))/g->dx[1];
courz = (c+fabs(q[idx+3*loffset]))/g->dx[2];
courno[cour_idx] = MAX(courx, MAX(coury, courz));
}
else
courno[cour_idx] = -1.0; //TODO: make it minus infinity
}
}
__global__ void gpu_ctoprim_kernel(
global_const_t *g, // i: Application parameters
double *u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k, idx, loffset;
int numthreads = BLOCK_DIM;
double rhoinv, eint, c, courx, coury, courz;
idx = blockIdx.x * blockDim.x + threadIdx.x;
k = idx / (g->dim_g[0] * g->dim_g[1]);
j = (idx / g->dim_g[0]) % g->dim_g[1];
i = idx % g->dim_g[0];
idx = k*g->plane_offset_g_padded + j*g->pitch_g[0] + i;
loffset = g->comp_offset_g_padded;
// Calculate Q
if( idx < loffset ){
rhoinv = 1.0E0/u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx] = u[idx]; //u(i,j,k,1) = u[0][i][j][k]
q[idx+loffset] = u[idx+loffset]*rhoinv; //u(i,j,k,2) = u[1][i][j][k]
q[idx+2*loffset] = u[idx+2*loffset]*rhoinv; //u(i,j,k,3) = u[2][i][j][k]
q[idx+3*loffset] = u[idx+3*loffset]*rhoinv; //u(i,j,k,4) = u[3][i][j][k]
eint = u[idx+4*loffset]*rhoinv - 0.5E0*(SQR(q[idx+loffset]) + SQR(q[idx+2*loffset]) + SQR(q[idx+3*loffset]));
q[idx+4*loffset] = (GAMMA-1.0E0)*eint*u[idx];
q[idx+5*loffset] = eint/CV;
}
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
gpu_ctoprim_kernel<<<grid_dim, block_dim>>>(d_const, u_d, q_d, h_const.temp[0]);
// Find max & update courno
// TODO: make it minus infinity
thrust::device_ptr<double> dev_ptr(h_const.temp[0]);
courno = thrust::reduce(dev_ptr, dev_ptr + len, (double) -INFINITY, thrust::maximum<double>());
}
void gpu_ctoprim(
global_const_t h_const, // i: Global struct containing application parameters
global_const_t *d_const, // i: Device pointer to global struct containing application parameters
double *u_d, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double *q_d // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, len;
len = h_const.dim_g[0] * h_const.dim_g[1] * h_const.dim_g[2];
int grid_dim = (len + BLOCK_DIM-1) / BLOCK_DIM;
int block_dim = BLOCK_DIM;
// TODO: edit parameters
gpu_ctoprim_kernel<<<grid_dim, block_dim>>>(d_const, u_d, q_d);
}
#undef SQR
#define SQR(x) ((x)*(x))
#define u(i,j,k,l) u[l-1][i][j][k]
#define q(i,j,k,l) q[l-1][i][j][k]
#define dx(i) h.dx[i-1]
#define dxinv(i) h.dxinv[i-1]
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q, // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
double &courno // i/o
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
// #pragma omp parallel for private(i, j, k, c, courx, coury, courz) reduction(max: courmx, courmy, courmz)
DO(k, h.lo[2], h.hi[2]){
DO(j, h.lo[1], h.hi[1]){
DO(i, h.lo[0], h.hi[0]){
c = sqrt(GAMMA*q(i,j,k,5)/q(i,j,k,1));
courx = ( c+fabs(q(i,j,k,2)) ) / dx(1); // I tried to change to * dxinv(1) but the results diverge.. (max diff = 5E-8)
coury = ( c+fabs(q(i,j,k,3)) ) / dx(2);
courz = ( c+fabs(q(i,j,k,4)) ) / dx(3);
courmx = MAX( courmx, courx );
courmy = MAX( courmy, coury );
courmz = MAX( courmz, courz );
}
}
}
//
// Compute running max of Courant number over grids.
//
courno = MAX(MAX(courmx, courmy), MAX(courmz, courno));
}
void ctoprim (
global_const_t h,
double ****u, // i: u[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][5]
double ****q // o: q[hi[0]-lo[0]+2*ng][hi[1]-lo[1]+2*ng][hi[2]-lo[2]+2*ng][6]
){
int i, j, k;
double c, eint, courx, coury, courz, courmx, courmy, courmz, rhoinv;
const double GAMMA = 1.4E0;
const double CV = 8.3333333333E6;
// #pragma omp parallel for private(i, j, k, eint, rhoinv)
DO(k, h.lo[2]-h.ng, h.hi[2]+h.ng){
DO(j, h.lo[1]-h.ng, h.hi[1]+h.ng){
DO(i, h.lo[0]-h.ng, h.hi[0]+h.ng){
rhoinv = 1.0E0/u(i,j,k,1);
q(i,j,k,1) = u(i,j,k,1);
q(i,j,k,2) = u(i,j,k,2)*rhoinv;
q(i,j,k,3) = u(i,j,k,3)*rhoinv;
q(i,j,k,4) = u(i,j,k,4)*rhoinv;
eint = u(i,j,k,5)*rhoinv - 0.5E0*(SQR(q(i,j,k,2)) + SQR(q(i,j,k,3)) + SQR(q(i,j,k,4)));
q(i,j,k,5) = (GAMMA-1.0E0)*eint*u(i,j,k,1);
q(i,j,k,6) = eint/CV;
}
}
}
}
#undef u
#undef q
#undef dx
|
c46cfac39cf999ee999d97945547e84ad8806ab0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void sumSingleBlock(int* d)
{
int tid = threadIdx.x;
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1)
{
// thread must be allowed to write
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
d[pa] += d[pb];
}
}
}
__global__ void sumSingleBlock2(int* d)
{
extern __shared__ int dcopy[];
int tid = threadIdx.x;
dcopy[tid*2] = d[tid*2];
dcopy[tid*2+1] = d[tid*2+1];
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1)
{
// thread must be allowed to write
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
}
}
if (tid == 0)
{
d[0] = dcopy[0];
}
}
int main()
{
hipError_t status;
const int count = 256;
const int size = count * sizeof(int);
int* h = new int[count];
for (int i = 0; i < count; ++i)
h[i] = i+1;
int* d;
status = hipMalloc(&d, size);
status = hipMemcpy(d,h,size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sumSingleBlock2), dim3(1),dim3(count/2),size, 0, d);
int result;
status = hipMemcpy(&result,d,sizeof(int),hipMemcpyDeviceToHost);
cout << "Sum is " << result << endl;
getchar();
hipFree(d);
delete [] h;
return 0;
} | c46cfac39cf999ee999d97945547e84ad8806ab0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void sumSingleBlock(int* d)
{
int tid = threadIdx.x;
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1)
{
// thread must be allowed to write
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
d[pa] += d[pb];
}
}
}
__global__ void sumSingleBlock2(int* d)
{
extern __shared__ int dcopy[];
int tid = threadIdx.x;
dcopy[tid*2] = d[tid*2];
dcopy[tid*2+1] = d[tid*2+1];
// number of participating threads halves on each iteration
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>= 1, stepSize <<= 1)
{
// thread must be allowed to write
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
}
}
if (tid == 0)
{
d[0] = dcopy[0];
}
}
int main()
{
cudaError_t status;
const int count = 256;
const int size = count * sizeof(int);
int* h = new int[count];
for (int i = 0; i < count; ++i)
h[i] = i+1;
int* d;
status = cudaMalloc(&d, size);
status = cudaMemcpy(d,h,size, cudaMemcpyHostToDevice);
sumSingleBlock2<<<1,count/2,size>>>(d);
int result;
status = cudaMemcpy(&result,d,sizeof(int),cudaMemcpyDeviceToHost);
cout << "Sum is " << result << endl;
getchar();
cudaFree(d);
delete [] h;
return 0;
} |
b1dc5abf00b5687aba1151e94d6f151afa222c8d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *D = NULL;
hipMalloc(&D, XSIZE*YSIZE);
int *q = NULL;
hipMalloc(&q, XSIZE*YSIZE);
int b = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, D,q,b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, D,q,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel2), dim3(gridBlock),dim3(threadBlock), 0, 0, D,q,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | b1dc5abf00b5687aba1151e94d6f151afa222c8d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *D = NULL;
cudaMalloc(&D, XSIZE*YSIZE);
int *q = NULL;
cudaMalloc(&q, XSIZE*YSIZE);
int b = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel2<<<gridBlock,threadBlock>>>(D,q,b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel2<<<gridBlock,threadBlock>>>(D,q,b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel2<<<gridBlock,threadBlock>>>(D,q,b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ee19137ef9a6cde05eddb9919e6bb3eff08ddb38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_CLAMP_OPERATOR_INSTANTIATE
#include "lbann/operators/math/clamp.hpp"
#include "lbann/utils/gpu/sync_info_helpers.hpp"
namespace lbann {
namespace {
/** CUDA kernel for forward prop computation. */
template <typename DataT>
__global__ void fp_kernel(DataT min,
DataT max,
El::Int height,
El::Int width,
const DataT* __restrict__ input,
El::Int input_ldim,
DataT* __restrict__ output,
El::Int output_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int size = height * width;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int pos = gid; pos < size; pos += num_threads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& x = input[row + col * input_ldim];
auto& y = output[row + col * output_ldim];
if (x <= min) { y = min; }
else if (x >= max) { y = max; }
else { y = x; }
}
}
/** GPU kernel for backprop computation. */
template <typename DataT>
__global__ void bp_kernel(DataT min,
DataT max,
El::Int height,
El::Int width,
const DataT* __restrict__ input,
El::Int input_ldim,
const DataT* __restrict__ gradient_wrt_output,
El::Int gradient_wrt_output_ldim,
DataT* __restrict__ gradient_wrt_input,
El::Int gradient_wrt_input_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int size = height * width;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int pos = gid; pos < size; pos += num_threads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = (x <= min || x >= max) ? DataT(0.f) : dy;
}
}
/** Local forward prop computation. */
template <typename DataT>
void local_fp(DataT min,
DataT max,
El::Matrix<DataT, El::Device::GPU> const& input,
El::Matrix<DataT, El::Device::GPU>& output) {
// Get CUDA grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
// TODO: HIP/ROCM notes
const El::Int height = input.Height();
const El::Int width = input.Width();
const El::Int block_dim = 256;
El::Int grid_dim = (height * width + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(unsigned int)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch GPU kernel
if (grid_dim > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(input),
gpu::get_sync_info(output));
hydrogen::gpu::LaunchKernel(
fp_kernel<DataT>,
grid_dim, block_dim, 0, multisync,
min, max, height, width,
input.LockedBuffer(), input.LDim(),
output.Buffer(), output.LDim());
}
}
/** Local backprop computation. */
template <typename DataT>
void local_bp(DataT min,
DataT max,
El::Matrix<DataT, El::Device::GPU> const& input,
El::Matrix<DataT, El::Device::GPU> const& gradient_wrt_output,
El::Matrix<DataT, El::Device::GPU>& gradient_wrt_input) {
// Get CUDA grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
// TODO: HIP/ROCM notes
const El::Int height = input.Height();
const El::Int width = input.Width();
const El::Int block_dim = 256;
El::Int grid_dim = (height * width + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(unsigned int)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch GPU kernel
if (grid_dim > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(gradient_wrt_output),
gpu::get_sync_info(gradient_wrt_input));
hydrogen::gpu::LaunchKernel(
bp_kernel<DataT>,
grid_dim, block_dim, 0, multisync,
min, max, height, width,
input.LockedBuffer(), input.LDim(),
gradient_wrt_output.LockedBuffer(), gradient_wrt_output.LDim(),
gradient_wrt_input.Buffer(), gradient_wrt_input.LDim());
}
}
} // namespace
template <typename DataT, El::Device D>
void ClampOperator<DataT, D>::fp_compute_local(
std::vector<ConstLocalInputTensorType> inputs,
std::vector<LocalOutputTensorType> outputs) const
{
LBANN_ASSERT(inputs.size() == 1 && outputs.size() == 1);
local_fp(this->m_min,
this->m_max,
inputs[0].data(),
outputs[0].data());
}
template <typename DataT, El::Device D>
void ClampOperator<DataT, D>::bp_compute_local(
std::vector<ConstLocalInputTensorType> inputs,
std::vector<ConstLocalOutputTensorType> gradient_wrt_outputs,
std::vector<LocalInputTensorType> gradient_wrt_inputs) const
{
LBANN_ASSERT(inputs.size() == 1 && gradient_wrt_outputs.size() == 1 &&
gradient_wrt_inputs.size() == 1);
local_bp(this->m_min,
this->m_max,
inputs[0].data(),
gradient_wrt_outputs[0].data(),
gradient_wrt_inputs[0].data());
}
#define PROTO(T) template class ClampOperator<T, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| ee19137ef9a6cde05eddb9919e6bb3eff08ddb38.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2022, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <lbann-dev@llnl.gov>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_CLAMP_OPERATOR_INSTANTIATE
#include "lbann/operators/math/clamp.hpp"
#include "lbann/utils/gpu/sync_info_helpers.hpp"
namespace lbann {
namespace {
/** CUDA kernel for forward prop computation. */
template <typename DataT>
__global__ void fp_kernel(DataT min,
DataT max,
El::Int height,
El::Int width,
const DataT* __restrict__ input,
El::Int input_ldim,
DataT* __restrict__ output,
El::Int output_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int size = height * width;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int pos = gid; pos < size; pos += num_threads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& x = input[row + col * input_ldim];
auto& y = output[row + col * output_ldim];
if (x <= min) { y = min; }
else if (x >= max) { y = max; }
else { y = x; }
}
}
/** GPU kernel for backprop computation. */
template <typename DataT>
__global__ void bp_kernel(DataT min,
DataT max,
El::Int height,
El::Int width,
const DataT* __restrict__ input,
El::Int input_ldim,
const DataT* __restrict__ gradient_wrt_output,
El::Int gradient_wrt_output_ldim,
DataT* __restrict__ gradient_wrt_input,
El::Int gradient_wrt_input_ldim) {
const El::Int gid = threadIdx.x + blockIdx.x * blockDim.x;
const El::Int size = height * width;
const El::Int num_threads = blockDim.x * gridDim.x;
for (El::Int pos = gid; pos < size; pos += num_threads) {
const auto& row = pos % height;
const auto& col = pos / height;
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = (x <= min || x >= max) ? DataT(0.f) : dy;
}
}
/** Local forward prop computation. */
template <typename DataT>
void local_fp(DataT min,
DataT max,
El::Matrix<DataT, El::Device::GPU> const& input,
El::Matrix<DataT, El::Device::GPU>& output) {
// Get CUDA grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
// TODO: HIP/ROCM notes
const El::Int height = input.Height();
const El::Int width = input.Width();
const El::Int block_dim = 256;
El::Int grid_dim = (height * width + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(unsigned int)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch GPU kernel
if (grid_dim > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(input),
gpu::get_sync_info(output));
hydrogen::gpu::LaunchKernel(
fp_kernel<DataT>,
grid_dim, block_dim, 0, multisync,
min, max, height, width,
input.LockedBuffer(), input.LDim(),
output.Buffer(), output.LDim());
}
}
/** Local backprop computation. */
template <typename DataT>
void local_bp(DataT min,
DataT max,
El::Matrix<DataT, El::Device::GPU> const& input,
El::Matrix<DataT, El::Device::GPU> const& gradient_wrt_output,
El::Matrix<DataT, El::Device::GPU>& gradient_wrt_input) {
// Get CUDA grid dimensions
// Note: Maximum CUDA grid dimension is 2^32-1
// (https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications).
// TODO: HIP/ROCM notes
const El::Int height = input.Height();
const El::Int width = input.Width();
const El::Int block_dim = 256;
El::Int grid_dim = (height * width + block_dim - 1) / block_dim;
if (sizeof(El::Int) > sizeof(unsigned int)
&& grid_dim > std::numeric_limits<uint32_t>::max()) {
grid_dim = std::numeric_limits<uint32_t>::max();
}
// Launch GPU kernel
if (grid_dim > 0) {
auto multisync = El::MakeMultiSync(gpu::get_sync_info(gradient_wrt_output),
gpu::get_sync_info(gradient_wrt_input));
hydrogen::gpu::LaunchKernel(
bp_kernel<DataT>,
grid_dim, block_dim, 0, multisync,
min, max, height, width,
input.LockedBuffer(), input.LDim(),
gradient_wrt_output.LockedBuffer(), gradient_wrt_output.LDim(),
gradient_wrt_input.Buffer(), gradient_wrt_input.LDim());
}
}
} // namespace
template <typename DataT, El::Device D>
void ClampOperator<DataT, D>::fp_compute_local(
std::vector<ConstLocalInputTensorType> inputs,
std::vector<LocalOutputTensorType> outputs) const
{
LBANN_ASSERT(inputs.size() == 1 && outputs.size() == 1);
local_fp(this->m_min,
this->m_max,
inputs[0].data(),
outputs[0].data());
}
template <typename DataT, El::Device D>
void ClampOperator<DataT, D>::bp_compute_local(
std::vector<ConstLocalInputTensorType> inputs,
std::vector<ConstLocalOutputTensorType> gradient_wrt_outputs,
std::vector<LocalInputTensorType> gradient_wrt_inputs) const
{
LBANN_ASSERT(inputs.size() == 1 && gradient_wrt_outputs.size() == 1 &&
gradient_wrt_inputs.size() == 1);
local_bp(this->m_min,
this->m_max,
inputs[0].data(),
gradient_wrt_outputs[0].data(),
gradient_wrt_inputs[0].data());
}
#define PROTO(T) template class ClampOperator<T, El::Device::GPU>
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
cd1bb264d9dc6b8f07bf8a42ec5fb2a1d3ed0bb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
// host code, entry point
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for
// profiling and tracing tools such as Nsight and Visual
// Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "cudaMaloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// hipDeviceSynchronize waits for the kernel to finish, and
// returns any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
| cd1bb264d9dc6b8f07bf8a42ec5fb2a1d3ed0bb4.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
// host code, entry point
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaThreadExit must be called before exiting in order for
// profiling and tracing tools such as Nsight and Visual
// Profiler to show complete traces.
cudaStatus = cudaThreadExit();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadExit failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMaloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// cudaThreadSynchronize waits for the kernel to finish, and
// returns any errors encountered during the launch.
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
d8e0b645d7fa6df65e07adcd3f90f35bbea721a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void
velocity_one(float2* psi1, float2* psi2, int resy, int resz, int num, float hbar, float pi, float* vx, float* vy, float* vz)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float2 c1 = make_float2(psi1[i].x, -psi1[i].y);
float2 c2 = make_float2(psi2[i].x, -psi2[i].y);
float2 mul1 = psi1[(i / resz) * resz + (i + 1) % resz];
float2 mul2 = psi2[(i / resz) * resz + (i + 1) % resz];
float2 summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
float result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vz[i] = (float)result * hbar;
mul1 = psi1[i - ((i / resz) % resy) * resz + (((i + resz) / resz) % resy) * resz];
mul2 = psi2[i - ((i / resz) % resy) * resz + (((i + resz) / resz) % resy) * resz];
summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vy[i] = (float)result * hbar;
mul1 = psi1[(i + resz * resy) % num];
mul2 = psi2[(i + resz * resy) % num];
summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vx[i] = result * hbar;
} | d8e0b645d7fa6df65e07adcd3f90f35bbea721a4.cu | extern "C" __global__ void
velocity_one(float2* psi1, float2* psi2, int resy, int resz, int num, float hbar, float pi, float* vx, float* vy, float* vz)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float2 c1 = make_float2(psi1[i].x, -psi1[i].y);
float2 c2 = make_float2(psi2[i].x, -psi2[i].y);
float2 mul1 = psi1[(i / resz) * resz + (i + 1) % resz];
float2 mul2 = psi2[(i / resz) * resz + (i + 1) % resz];
float2 summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
float result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vz[i] = (float)result * hbar;
mul1 = psi1[i - ((i / resz) % resy) * resz + (((i + resz) / resz) % resy) * resz];
mul2 = psi2[i - ((i / resz) % resy) * resz + (((i + resz) / resz) % resy) * resz];
summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vy[i] = (float)result * hbar;
mul1 = psi1[(i + resz * resy) % num];
mul2 = psi2[(i + resz * resy) % num];
summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vx[i] = result * hbar;
} |
bd64e045ed9f1f6a331204c1dee07b8f87eced65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define N 20
#define BLOCK_DIM 10
using namespace std;
#define TILE_WIDTH 4
/*
// EJERCICIO DE LA SECCION 4.7
__global__
void matMultKernel_tile_siete_siente(int *d_M, int *d_N, int *d_P, int Width){
extern __shared__ int Mds[][];
extern __shared__ int Nds[][];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Pvalue = 0;
int ph,k;
for(ph = 0; ph < Width/TILE_WIDTH; ++ph){
// Collaborative loading of M and N tiles into shared memory
if ( (Row<Width) && (ph*TILE_WIDTH+tx)<Width )
Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx];
if ( (ph*TILE_WIDTH+ty)<Width && Col<Width )
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) + Col];
__syncthreads();
for(k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ( (Row<Width) && (Col<Width))
d_P[Row*Width + Col] = Pvalue;
}
*/
// EJERCICIO DE LA SECCION 4.6
__global__
void matMultKernel_tile_seis(int *d_M, int *d_N, int *d_P, int Width){
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Pvalue = 0;
int ph,k;
for(ph = 0; ph < Width/TILE_WIDTH; ++ph){
// Collaborative loading of M and N tiles into shared memory
if ( (Row<Width) && (ph*TILE_WIDTH+tx)<Width )
Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx];
if ( (ph*TILE_WIDTH+ty)<Width && Col<Width )
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) + Col];
__syncthreads();
for(k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ( (Row<Width) && (Col<Width))
d_P[Row*Width + Col] = Pvalue;
}
// EJERCICIO DE LA SECCION 4.4
__global__
void matMultKernel_tile(int *d_M, int *d_N, int *d_P, int Width){
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Pvalue = 0;
int ph,k;
for(ph = 0; ph < Width/TILE_WIDTH; ++ph){
Mds[ty][tx] = d_M[Row*Width+ph*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) * Width + Col];
__syncthreads();
for(k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
// EJERCICIO DE LA SECCION 4.2
__global__
void matMultKernel(int *d_M, int *d_N, int *d_P, int Width){
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int k = 0;
if(Row < Width && Col < Width){
float Pvalue = 0;
for(k = 0; k < Width; ++k){
Pvalue += d_M[Row*Width + k] * d_N[k*Width+Col];
}
d_P[Row*Width+Col] = Pvalue;
}
}
__global__
void sum_Matrices_Normal (int *a, int *b, int *c) {
int columna = blockIdx.x * blockDim.x + threadIdx.x;
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int id = columna + fila * N;
if (columna < N && fila < N) {
c[id] = a[id] + b[id];
}
}
void imprimir_Matriz(int matrix[N][N]){
for(int i=0;i<N;i++){
for(int j=0; j<N; j++){
cout<<matrix[i][j]<<' ';
}
cout<<endl;
}
}
void inicio_matrices(){
int a[N][N], b[N][N], c[N][N];
int *dev_a, *dev_b, *dev_c;
int size = N * N * sizeof(int);
srand(time(NULL));
for(int i=0; i<N; i++)
for (int j=0; j<N; j++){
a[i][j] = 1;
b[i][j] = 1;
}
imprimir_Matriz(a);
cout<<endl;
imprimir_Matriz(b);
hipMalloc((void**)&dev_a, size);
hipMalloc((void**)&dev_b, size);
hipMalloc((void**)&dev_c, size);
hipMemcpy(dev_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size, hipMemcpyHostToDevice);
//dim3 dimBlock(N,N); // cuantos threads se ejecutaran juntos y que compartiran memoria en un sigle proccessor
//dim3 dimGrid(1,1); // un grupo de thread block que se ejecutan en un sigle cuda program logically in parallel
//dim3 dimGrid(ceil(N/1024.0),ceil(N/1024.0),1);
//dim3 dimBlock(1024,1024,1); /*Dimensinde 2 X 2, eje Z desactivado con 1*/
dim3 dimGrid(ceil(N/4.0),ceil(N/4.0),1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
hipLaunchKernelGGL(( matMultKernel_tile_seis), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_a,dev_b,dev_c, N);
//for siete
//size_t size= calculate_appropriate_SM_usage(dev_prop.sharedMemPerBlock,...);
//matMultKernel_tile_siete_siente<<<dimGrid, dimBlock, size>>>(Md, Nd, Pd, Width);
hipDeviceSynchronize();
hipMemcpy(c, dev_c, size, hipMemcpyDeviceToHost);
cout<<endl;
for(int i=0; i<N; i++){
for (int j=0; j<N; j++){
printf("%d ", c[i][j] );
}
printf("\n");
}
hipFree(dev_a); hipFree(dev_b); hipFree(dev_c);
}
int main() {
inicio_matrices();
return 0;
} | bd64e045ed9f1f6a331204c1dee07b8f87eced65.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define N 20
#define BLOCK_DIM 10
using namespace std;
#define TILE_WIDTH 4
/*
// EJERCICIO DE LA SECCION 4.7
__global__
void matMultKernel_tile_siete_siente(int *d_M, int *d_N, int *d_P, int Width){
extern __shared__ int Mds[][];
extern __shared__ int Nds[][];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Pvalue = 0;
int ph,k;
for(ph = 0; ph < Width/TILE_WIDTH; ++ph){
// Collaborative loading of M and N tiles into shared memory
if ( (Row<Width) && (ph*TILE_WIDTH+tx)<Width )
Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx];
if ( (ph*TILE_WIDTH+ty)<Width && Col<Width )
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) + Col];
__syncthreads();
for(k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ( (Row<Width) && (Col<Width))
d_P[Row*Width + Col] = Pvalue;
}
*/
// EJERCICIO DE LA SECCION 4.6
__global__
void matMultKernel_tile_seis(int *d_M, int *d_N, int *d_P, int Width){
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Pvalue = 0;
int ph,k;
for(ph = 0; ph < Width/TILE_WIDTH; ++ph){
// Collaborative loading of M and N tiles into shared memory
if ( (Row<Width) && (ph*TILE_WIDTH+tx)<Width )
Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx];
if ( (ph*TILE_WIDTH+ty)<Width && Col<Width )
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) + Col];
__syncthreads();
for(k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if ( (Row<Width) && (Col<Width))
d_P[Row*Width + Col] = Pvalue;
}
// EJERCICIO DE LA SECCION 4.4
__global__
void matMultKernel_tile(int *d_M, int *d_N, int *d_P, int Width){
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Pvalue = 0;
int ph,k;
for(ph = 0; ph < Width/TILE_WIDTH; ++ph){
Mds[ty][tx] = d_M[Row*Width+ph*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) * Width + Col];
__syncthreads();
for(k = 0; k < TILE_WIDTH; ++k){
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
// EJERCICIO DE LA SECCION 4.2
__global__
void matMultKernel(int *d_M, int *d_N, int *d_P, int Width){
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int k = 0;
if(Row < Width && Col < Width){
float Pvalue = 0;
for(k = 0; k < Width; ++k){
Pvalue += d_M[Row*Width + k] * d_N[k*Width+Col];
}
d_P[Row*Width+Col] = Pvalue;
}
}
__global__
void sum_Matrices_Normal (int *a, int *b, int *c) {
int columna = blockIdx.x * blockDim.x + threadIdx.x;
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int id = columna + fila * N;
if (columna < N && fila < N) {
c[id] = a[id] + b[id];
}
}
void imprimir_Matriz(int matrix[N][N]){
for(int i=0;i<N;i++){
for(int j=0; j<N; j++){
cout<<matrix[i][j]<<' ';
}
cout<<endl;
}
}
void inicio_matrices(){
int a[N][N], b[N][N], c[N][N];
int *dev_a, *dev_b, *dev_c;
int size = N * N * sizeof(int);
srand(time(NULL));
for(int i=0; i<N; i++)
for (int j=0; j<N; j++){
a[i][j] = 1;
b[i][j] = 1;
}
imprimir_Matriz(a);
cout<<endl;
imprimir_Matriz(b);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
//dim3 dimBlock(N,N); // cuantos threads se ejecutaran juntos y que compartiran memoria en un sigle proccessor
//dim3 dimGrid(1,1); // un grupo de thread block que se ejecutan en un sigle cuda program logically in parallel
//dim3 dimGrid(ceil(N/1024.0),ceil(N/1024.0),1);
//dim3 dimBlock(1024,1024,1); /*Dimensiónde 2 X 2, eje Z desactivado con 1*/
dim3 dimGrid(ceil(N/4.0),ceil(N/4.0),1);
dim3 dimBlock(TILE_WIDTH,TILE_WIDTH,1);
matMultKernel_tile_seis<<<dimGrid,dimBlock>>>(dev_a,dev_b,dev_c, N);
//for siete
//size_t size= calculate_appropriate_SM_usage(dev_prop.sharedMemPerBlock,...);
//matMultKernel_tile_siete_siente<<<dimGrid, dimBlock, size>>>(Md, Nd, Pd, Width);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
cout<<endl;
for(int i=0; i<N; i++){
for (int j=0; j<N; j++){
printf("%d ", c[i][j] );
}
printf("\n");
}
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
}
int main() {
inicio_matrices();
return 0;
} |
box3d1r-32x32-3-128_kernel.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box3d1r-32x32-3-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((0.0375f * (__REGREF(__a, 0, 0))) + (0.0371f * (__SBREF(__a_sb, -1, -1)))) + (0.0372f * (__SBREF(__a_sb, -1, 0)))) + (0.0373f * (__SBREF(__a_sb, -1, 1)))) + (0.0374f * (__SBREF(__a_sb, 0, -1)))) + (0.0376f * (__SBREF(__a_sb, 0, 1)))) + (0.0377f * (__SBREF(__a_sb, 1, -1)))) + (0.0378f * (__SBREF(__a_sb, 1, 0)))) + (0.0379f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.0355f * (__REGREF(__a, 0, 0)))) + (0.0351f * (__SBREF(__a_sb, -1, -1)))) + (0.0352f * (__SBREF(__a_sb, -1, 0)))) + (0.0353f * (__SBREF(__a_sb, -1, 1)))) + (0.0354f * (__SBREF(__a_sb, 0, -1)))) + (0.0356f * (__SBREF(__a_sb, 0, 1)))) + (0.0357f * (__SBREF(__a_sb, 1, -1)))) + (0.0358f * (__SBREF(__a_sb, 1, 0)))) + (0.0359f * (__SBREF(__a_sb, 1, 1))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((0.0365f * (__REGREF(__a, 0, 0)))) + (0.0361f * (__SBREF(__a_sb, -1, -1)))) + (0.0362f * (__SBREF(__a_sb, -1, 0)))) + (0.0363f * (__SBREF(__a_sb, -1, 1)))) + (0.0364f * (__SBREF(__a_sb, 0, -1)))) + (0.0366f * (__SBREF(__a_sb, 0, 1)))) + (0.0367f * (__SBREF(__a_sb, 1, -1)))) + (0.0368f * (__SBREF(__a_sb, 1, 0)))) + (0.0369f * (__SBREF(__a_sb, 1, 1)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((0.0375f * (__REGREF(__a, 0, 0))) + (0.0371f * (__SBREF(__a_sb, -1, -1)))) + (0.0372f * (__SBREF(__a_sb, -1, 0)))) + (0.0373f * (__SBREF(__a_sb, -1, 1)))) + (0.0374f * (__SBREF(__a_sb, 0, -1)))) + (0.0376f * (__SBREF(__a_sb, 0, 1)))) + (0.0377f * (__SBREF(__a_sb, 1, -1)))) + (0.0378f * (__SBREF(__a_sb, 1, 0)))) + (0.0379f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.0355f * (__REGREF(__a, 0, 0)))) + (0.0351f * (__SBREF(__a_sb, -1, -1)))) + (0.0352f * (__SBREF(__a_sb, -1, 0)))) + (0.0353f * (__SBREF(__a_sb, -1, 1)))) + (0.0354f * (__SBREF(__a_sb, 0, -1)))) + (0.0356f * (__SBREF(__a_sb, 0, 1)))) + (0.0357f * (__SBREF(__a_sb, 1, -1)))) + (0.0358f * (__SBREF(__a_sb, 1, 0)))) + (0.0359f * (__SBREF(__a_sb, 1, 1))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((0.0365f * (__REGREF(__a, 0, 0)))) + (0.0361f * (__SBREF(__a_sb, -1, -1)))) + (0.0362f * (__SBREF(__a_sb, -1, 0)))) + (0.0363f * (__SBREF(__a_sb, -1, 1)))) + (0.0364f * (__SBREF(__a_sb, 0, -1)))) + (0.0366f * (__SBREF(__a_sb, 0, 1)))) + (0.0367f * (__SBREF(__a_sb, 1, -1)))) + (0.0368f * (__SBREF(__a_sb, 1, 0)))) + (0.0369f * (__SBREF(__a_sb, 1, 1)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((0.0375f * (__REGREF(__a, 0, 0))) + (0.0371f * (__SBREF(__a_sb, -1, -1)))) + (0.0372f * (__SBREF(__a_sb, -1, 0)))) + (0.0373f * (__SBREF(__a_sb, -1, 1)))) + (0.0374f * (__SBREF(__a_sb, 0, -1)))) + (0.0376f * (__SBREF(__a_sb, 0, 1)))) + (0.0377f * (__SBREF(__a_sb, 1, -1)))) + (0.0378f * (__SBREF(__a_sb, 1, 0)))) + (0.0379f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.0355f * (__REGREF(__a, 0, 0)))) + (0.0351f * (__SBREF(__a_sb, -1, -1)))) + (0.0352f * (__SBREF(__a_sb, -1, 0)))) + (0.0353f * (__SBREF(__a_sb, -1, 1)))) + (0.0354f * (__SBREF(__a_sb, 0, -1)))) + (0.0356f * (__SBREF(__a_sb, 0, 1)))) + (0.0357f * (__SBREF(__a_sb, 1, -1)))) + (0.0358f * (__SBREF(__a_sb, 1, 0)))) + (0.0359f * (__SBREF(__a_sb, 1, 1))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((0.0365f * (__REGREF(__a, 0, 0)))) + (0.0361f * (__SBREF(__a_sb, -1, -1)))) + (0.0362f * (__SBREF(__a_sb, -1, 0)))) + (0.0363f * (__SBREF(__a_sb, -1, 1)))) + (0.0364f * (__SBREF(__a_sb, 0, -1)))) + (0.0366f * (__SBREF(__a_sb, 0, 1)))) + (0.0367f * (__SBREF(__a_sb, 1, -1)))) + (0.0368f * (__SBREF(__a_sb, 1, 0)))) + (0.0369f * (__SBREF(__a_sb, 1, 1)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
| box3d1r-32x32-3-128_kernel.cu | #include "box3d1r-32x32-3-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((0.0375f * (__REGREF(__a, 0, 0))) + (0.0371f * (__SBREF(__a_sb, -1, -1)))) + (0.0372f * (__SBREF(__a_sb, -1, 0)))) + (0.0373f * (__SBREF(__a_sb, -1, 1)))) + (0.0374f * (__SBREF(__a_sb, 0, -1)))) + (0.0376f * (__SBREF(__a_sb, 0, 1)))) + (0.0377f * (__SBREF(__a_sb, 1, -1)))) + (0.0378f * (__SBREF(__a_sb, 1, 0)))) + (0.0379f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.0355f * (__REGREF(__a, 0, 0)))) + (0.0351f * (__SBREF(__a_sb, -1, -1)))) + (0.0352f * (__SBREF(__a_sb, -1, 0)))) + (0.0353f * (__SBREF(__a_sb, -1, 1)))) + (0.0354f * (__SBREF(__a_sb, 0, -1)))) + (0.0356f * (__SBREF(__a_sb, 0, 1)))) + (0.0357f * (__SBREF(__a_sb, 1, -1)))) + (0.0358f * (__SBREF(__a_sb, 1, 0)))) + (0.0359f * (__SBREF(__a_sb, 1, 1))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((0.0365f * (__REGREF(__a, 0, 0)))) + (0.0361f * (__SBREF(__a_sb, -1, -1)))) + (0.0362f * (__SBREF(__a_sb, -1, 0)))) + (0.0363f * (__SBREF(__a_sb, -1, 1)))) + (0.0364f * (__SBREF(__a_sb, 0, -1)))) + (0.0366f * (__SBREF(__a_sb, 0, 1)))) + (0.0367f * (__SBREF(__a_sb, 1, -1)))) + (0.0368f * (__SBREF(__a_sb, 1, 0)))) + (0.0369f * (__SBREF(__a_sb, 1, 1)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((0.0375f * (__REGREF(__a, 0, 0))) + (0.0371f * (__SBREF(__a_sb, -1, -1)))) + (0.0372f * (__SBREF(__a_sb, -1, 0)))) + (0.0373f * (__SBREF(__a_sb, -1, 1)))) + (0.0374f * (__SBREF(__a_sb, 0, -1)))) + (0.0376f * (__SBREF(__a_sb, 0, 1)))) + (0.0377f * (__SBREF(__a_sb, 1, -1)))) + (0.0378f * (__SBREF(__a_sb, 1, 0)))) + (0.0379f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.0355f * (__REGREF(__a, 0, 0)))) + (0.0351f * (__SBREF(__a_sb, -1, -1)))) + (0.0352f * (__SBREF(__a_sb, -1, 0)))) + (0.0353f * (__SBREF(__a_sb, -1, 1)))) + (0.0354f * (__SBREF(__a_sb, 0, -1)))) + (0.0356f * (__SBREF(__a_sb, 0, 1)))) + (0.0357f * (__SBREF(__a_sb, 1, -1)))) + (0.0358f * (__SBREF(__a_sb, 1, 0)))) + (0.0359f * (__SBREF(__a_sb, 1, 1))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((0.0365f * (__REGREF(__a, 0, 0)))) + (0.0361f * (__SBREF(__a_sb, -1, -1)))) + (0.0362f * (__SBREF(__a_sb, -1, 0)))) + (0.0363f * (__SBREF(__a_sb, -1, 1)))) + (0.0364f * (__SBREF(__a_sb, 0, -1)))) + (0.0366f * (__SBREF(__a_sb, 0, 1)))) + (0.0367f * (__SBREF(__a_sb, 1, -1)))) + (0.0368f * (__SBREF(__a_sb, 1, 0)))) + (0.0369f * (__SBREF(__a_sb, 1, 1)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((0.0375f * (__REGREF(__a, 0, 0))) + (0.0371f * (__SBREF(__a_sb, -1, -1)))) + (0.0372f * (__SBREF(__a_sb, -1, 0)))) + (0.0373f * (__SBREF(__a_sb, -1, 1)))) + (0.0374f * (__SBREF(__a_sb, 0, -1)))) + (0.0376f * (__SBREF(__a_sb, 0, 1)))) + (0.0377f * (__SBREF(__a_sb, 1, -1)))) + (0.0378f * (__SBREF(__a_sb, 1, 0)))) + (0.0379f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((0.0355f * (__REGREF(__a, 0, 0)))) + (0.0351f * (__SBREF(__a_sb, -1, -1)))) + (0.0352f * (__SBREF(__a_sb, -1, 0)))) + (0.0353f * (__SBREF(__a_sb, -1, 1)))) + (0.0354f * (__SBREF(__a_sb, 0, -1)))) + (0.0356f * (__SBREF(__a_sb, 0, 1)))) + (0.0357f * (__SBREF(__a_sb, 1, -1)))) + (0.0358f * (__SBREF(__a_sb, 1, 0)))) + (0.0359f * (__SBREF(__a_sb, 1, 1))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((0.0365f * (__REGREF(__a, 0, 0)))) + (0.0361f * (__SBREF(__a_sb, -1, -1)))) + (0.0362f * (__SBREF(__a_sb, -1, 0)))) + (0.0363f * (__SBREF(__a_sb, -1, 1)))) + (0.0364f * (__SBREF(__a_sb, 0, -1)))) + (0.0366f * (__SBREF(__a_sb, 0, 1)))) + (0.0367f * (__SBREF(__a_sb, 1, -1)))) + (0.0368f * (__SBREF(__a_sb, 1, 0)))) + (0.0369f * (__SBREF(__a_sb, 1, 1)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
|
7ded7126021a585757ba335e430ac92ec4a8e7fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@generated from magmablas/zlanhe.cu normal z -> c, Tue Feb 9 16:05:29 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define inf_bs 32
#define max_bs 64
#define PRECISION_c
#define COMPLEX
/* ====================================================================== */
/* inf-norm */
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored lower.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */
__global__ void
clanhe_inf_kernel_lower(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ magmaFloatComplex la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block rows
A += ind;
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy lower triangle to upper triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i < tx ) {
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks below diagonal block
A += inf_bs;
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda];
}
A += inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is (n_mod_bs by inf_bs)
if ( n_mod_bs > 0 ) {
// load block (transposed), with zeros for rows outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( tx < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
else {
la[ty+j][tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block row
// Threads past end of matrix (i.e., ind >= n) are redundantly assigned
// the last row (n-1). At the end, those results are ignored -- only
// results for ind < n are saved into dwork.
if ( tx < n_mod_bs ) {
A += ind;
}
else {
A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row
}
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
// each is (n_mod_bs by inf_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_C_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// sum rows left of diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_C_ABS( *A );
A += lda;
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_C_REAL( *A ));
A += 1;
// sum column below diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_C_ABS( *A );
A += 1;
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored upper.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200).
* The upper implementation is similar to lower, but processes blocks
* in the transposed order:
* lower goes from left over to diagonal, then down to bottom;
* upper goes from top down to diagonal, then over to right.
* Differences are noted with # in comments. */
__global__ void
clanhe_inf_kernel_upper(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ magmaFloatComplex la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block #columns
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block (#transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda]; //#
}
A += inf_bs; //#
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy #upper triangle to #lower triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i > tx ) { //#
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks #right of diagonal block
A += inf_bs*lda; //#
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (#non-transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda]; //#
}
A += inf_bs*lda; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is #(inf_bs by n_mod_bs)
if ( n_mod_bs > 0 ) {
// load block (#non-transposed), with zeros for #cols outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) { //#
la[tx][ty+j] = A[j*lda]; //#
}
else {
la[tx][ty+j] = MAGMA_C_ZERO; //#
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block #column
// Instead of assigning threads ind >= n to the last row (n-1), as in Lower,
// Upper simply adjusts loop bounds to avoid loading columns outside the matrix.
// Again, at the end, those results are ignored -- only
// results for ind < n are saved into dwork.
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
// each is #(inf_bs by n_mod_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block (#transposed), #ignoring columns outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
}
A += inf_bs; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_C_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// #transpose pointer within diagonal block
// #i.e., from A = A(tx,ty), transpose to A = A(ty,tx).
A = A - tx - ty*lda + tx*lda + ty;
// sum #column above diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_C_ABS( *A );
A += 1; //#
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_C_REAL( *A ));
A += lda; //#
// sum #row right of diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_C_ABS( *A );
A += lda; //#
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */
extern "C" void
clanhe_inf(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( inf_bs, 4 );
dim3 grid( magma_ceildiv( n, inf_bs ), 1 );
magma_int_t n_full_block = (n - n % inf_bs) / inf_bs;
magma_int_t n_mod_bs = n % inf_bs;
if ( uplo == MagmaLower) {
hipLaunchKernelGGL(( clanhe_inf_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork, n_full_block, n_mod_bs );
}
else {
hipLaunchKernelGGL(( clanhe_inf_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork, n_full_block, n_mod_bs );
}
}
/* ====================================================================== */
/* max-norm */
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */
__global__ void
clanhe_max_kernel_lower(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
for (int j=0; j < ind; ++j) {
res = max_nan( res, MAGMA_C_ABS( *A ));
A += lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_C_REAL( *A )));
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */
__global__ void
clanhe_max_kernel_upper(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
A += (n-1)*lda;
for (int j=n-1; j > ind; j--) {
res = max_nan( res, MAGMA_C_ABS( *A ));
A -= lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_C_REAL( *A )));
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */
extern "C" void
clanhe_max(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( max_bs );
dim3 grid( magma_ceildiv( n, max_bs ) );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( clanhe_max_kernel_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork );
}
else {
hipLaunchKernelGGL(( clanhe_max_kernel_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, A, lda, dwork );
}
}
/* ====================================================================== */
/**
Purpose
-------
CLANHE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
complex Hermitian matrix A.
CLANHE = ( max(abs(A(i,j))), NORM = MagmaMaxNorm
(
( norm1(A), NORM = MagmaOneNorm
(
( normI(A), NORM = MagmaInfNorm
(
( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of squares).
Note that max(abs(A(i,j))) is not a consistent matrix norm.
On error, returns CLANHE < 0: if CLANHE = -i, the i-th argument had an illegal value.
Arguments:
----------
@param[in]
norm magma_norm_t
Specifies the value to be returned in CLANHE as described above.
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
Hermitian matrix A is to be referenced.
- = MagmaUpper: Upper triangular part of A is referenced
- = MagmaLower: Lower triangular part of A is referenced
@param[in]
n INTEGER
The order of the matrix A. N >= 0. When N = 0, CLANHE is
set to zero.
@param[in]
dA COMPLEX array on the GPU, dimension (LDDA,N)
The Hermitian matrix A. If UPLO = MagmaUpper, the leading n by n
upper triangular part of A contains the upper triangular part
of the matrix A, and the strictly lower triangular part of A
is not referenced. If UPLO = MagmaLower, the leading n by n lower
triangular part of A contains the lower triangular part of
the matrix A, and the strictly upper triangular part of A is
not referenced. Note that the imaginary parts of the diagonal
elements need not be set and are assumed to be zero.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(N,1).
@param
dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= N.
NOTE: this is different than LAPACK, where WORK is required
only for norm1 and normI. Here max-norm also requires WORK.
@param[in]
lwork INTEGER
The dimension of the array DWORK. LWORK >= max( 1, N ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" float
magmablas_clanhe_q(
magma_norm_t norm, magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
magma_int_t info = 0;
// 1-norm == inf-norm since A is Hermitian
bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm);
bool max_norm = (norm == MagmaMaxNorm);
// inf_norm Double-Complex requires > 16 KB shared data (arch >= 200)
#if defined(PRECISION_z)
const bool inf_implemented = (magma_getdevice_arch() >= 200);
#else
const bool inf_implemented = true;
#endif
if ( ! (max_norm || (inf_norm && inf_implemented)) )
info = -1;
else if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < n )
info = -5;
else if ( lwork < n )
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( n == 0 )
return 0;
float res = 0;
if ( inf_norm ) {
clanhe_inf( uplo, n, dA, ldda, dwork, queue );
}
else {
clanhe_max( uplo, n, dA, ldda, dwork, queue );
}
hipLaunchKernelGGL(( magma_max_nan_kernel), dim3(1), dim3(512), 0, queue->cuda_stream() , n, dwork );
magma_sgetvector( 1, &dwork[0], 1, &res, 1, queue );
return res;
}
| 7ded7126021a585757ba335e430ac92ec4a8e7fa.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Mark Gates
@generated from magmablas/zlanhe.cu normal z -> c, Tue Feb 9 16:05:29 2016
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define inf_bs 32
#define max_bs 64
#define PRECISION_c
#define COMPLEX
/* ====================================================================== */
/* inf-norm */
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored lower.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200). */
__global__ void
clanhe_inf_kernel_lower(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ magmaFloatComplex la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block rows
A += ind;
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy lower triangle to upper triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i < tx ) {
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks below diagonal block
A += inf_bs;
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda];
}
A += inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is (n_mod_bs by inf_bs)
if ( n_mod_bs > 0 ) {
// load block (transposed), with zeros for rows outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( tx < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
else {
la[ty+j][tx] = MAGMA_C_ZERO;
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block row
// Threads past end of matrix (i.e., ind >= n) are redundantly assigned
// the last row (n-1). At the end, those results are ignored -- only
// results for ind < n are saved into dwork.
if ( tx < n_mod_bs ) {
A += ind;
}
else {
A += (blockIdx.x*inf_bs + n_mod_bs - 1); // redundantly do last row
}
A += ty * lda;
// ----------
// loop over all blocks left of the diagonal block
// each is (n_mod_bs by inf_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
A += lda*inf_bs;
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_C_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// sum rows left of diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_C_ABS( *A );
A += lda;
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_C_REAL( *A ));
A += 1;
// sum column below diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_C_ABS( *A );
A += 1;
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf,
* where n is any size and A is stored upper.
* Has ceil( n / inf_bs ) blocks of (inf_bs x 4) threads each (inf_bs=32).
* z precision uses > 16 KB shared memory, so requires Fermi (arch >= 200).
* The upper implementation is similar to lower, but processes blocks
* in the transposed order:
* lower goes from left over to diagonal, then down to bottom;
* upper goes from top down to diagonal, then over to right.
* Differences are noted with # in comments. */
__global__ void
clanhe_inf_kernel_upper(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork,
int n_full_block, int n_mod_bs )
{
#if (defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || __CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int diag = blockIdx.x*inf_bs;
int ind = blockIdx.x*inf_bs + tx;
float res = 0.;
__shared__ magmaFloatComplex la[inf_bs][inf_bs+1];
if ( blockIdx.x < n_full_block ) {
// ------------------------------
// All full block #columns
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
for (int i=0; i < diag; i += inf_bs ) {
// 32x4 threads cooperatively load 32x32 block (#transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[ty+j][tx] = A[j*lda]; //#
}
A += inf_bs; //#
__syncthreads();
// compute 4 partial sums of each row, i.e.,
// for ty=0: res = sum( la[tx, 0: 7] )
// for ty=1: res = sum( la[tx, 8:15] )
// for ty=2: res = sum( la[tx,16:23] )
// for ty=3: res = sum( la[tx,24:31] )
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// load diagonal block
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda];
}
__syncthreads();
// copy #upper triangle to #lower triangle, and
// make diagonal real (zero imaginary part)
#pragma unroll 8
for (int i=ty*8; i < ty*8 + 8; i++) {
if ( i > tx ) { //#
la[i][tx] = la[tx][i];
}
#ifdef COMPLEX
else if ( i == tx ) {
la[i][i] = MAGMA_C_MAKE( MAGMA_C_REAL( la[i][i] ), 0 );
}
#endif
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
// ----------
// loop over all 32x32 blocks #right of diagonal block
A += inf_bs*lda; //#
for (int i=diag + inf_bs; i < n - n_mod_bs; i += inf_bs ) {
// load block (#non-transposed)
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
la[tx][ty+j] = A[j*lda]; //#
}
A += inf_bs*lda; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// last partial block, which is #(inf_bs by n_mod_bs)
if ( n_mod_bs > 0 ) {
// load block (#non-transposed), with zeros for #cols outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) { //#
la[tx][ty+j] = A[j*lda]; //#
}
else {
la[tx][ty+j] = MAGMA_C_ZERO; //#
}
}
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=ty*8; j < ty*8 + 8; j++) {
res += MAGMA_C_ABS( la[tx][j] );
}
__syncthreads();
}
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty] = MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
if ( ty == 0 ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
else {
// ------------------------------
// Last, partial block #column
// Instead of assigning threads ind >= n to the last row (n-1), as in Lower,
// Upper simply adjusts loop bounds to avoid loading columns outside the matrix.
// Again, at the end, those results are ignored -- only
// results for ind < n are saved into dwork.
A += blockIdx.x*inf_bs*lda + tx; //#
A += ty * lda;
// ----------
// loop over all blocks #above the diagonal block
// each is #(inf_bs by n_mod_bs)
for (int i=0; i < diag; i += inf_bs ) {
// load block (#transposed), #ignoring columns outside matrix
#pragma unroll 8
for (int j=0; j < inf_bs; j += 4) {
if ( ty+j < n_mod_bs ) {
la[ty+j][tx] = A[j*lda];
}
}
A += inf_bs; //#
__syncthreads();
// partial row sums
#pragma unroll 8
for (int j=0; j < 8; j++) {
res += MAGMA_C_ABS( la[tx][j+ty*8] );
}
__syncthreads();
}
// ----------
// partial diagonal block
if ( ty == 0 && tx < n_mod_bs ) {
// #transpose pointer within diagonal block
// #i.e., from A = A(tx,ty), transpose to A = A(ty,tx).
A = A - tx - ty*lda + tx*lda + ty;
// sum #column above diagonal
for (int j=0; j < tx; j++) {
res += MAGMA_C_ABS( *A );
A += 1; //#
}
// sum diagonal (ignoring imaginary part)
res += MAGMA_D_ABS( MAGMA_C_REAL( *A ));
A += lda; //#
// sum #row right of diagonal
for (int j=tx+1; j < n_mod_bs; j++) {
res += MAGMA_C_ABS( *A );
A += lda; //#
}
}
__syncthreads();
// ----------
// 32x4 threads store partial sums into shared memory
la[tx][ty]= MAGMA_C_MAKE( res, 0. );
__syncthreads();
// first column of 32x1 threads computes final sum of each row
// rows outside matrix are ignored
if ( ty == 0 && tx < n_mod_bs ) {
res = res
+ MAGMA_C_REAL( la[tx][1] )
+ MAGMA_C_REAL( la[tx][2] )
+ MAGMA_C_REAL( la[tx][3] );
dwork[ind] = res;
}
}
#endif /* (PRECISION_s || PRECISION_d || PRECISION_c || __CUDA_ARCH__ >= 200) */
}
/* Computes row sums dwork[i] = sum( abs( A(i,:) )), i=0:n-1, for || A ||_inf */
extern "C" void
clanhe_inf(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( inf_bs, 4 );
dim3 grid( magma_ceildiv( n, inf_bs ), 1 );
magma_int_t n_full_block = (n - n % inf_bs) / inf_bs;
magma_int_t n_mod_bs = n % inf_bs;
if ( uplo == MagmaLower) {
clanhe_inf_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork, n_full_block, n_mod_bs );
}
else {
clanhe_inf_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork, n_full_block, n_mod_bs );
}
}
/* ====================================================================== */
/* max-norm */
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored lower */
__global__ void
clanhe_max_kernel_lower(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
for (int j=0; j < ind; ++j) {
res = max_nan( res, MAGMA_C_ABS( *A ));
A += lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_C_REAL( *A )));
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,0:i) )), i=0:n-1, for ||A||_max, where A is stored upper. */
__global__ void
clanhe_max_kernel_upper(
int n,
const magmaFloatComplex * __restrict__ A, int lda,
float * __restrict__ dwork )
{
int ind = blockIdx.x*max_bs + threadIdx.x;
float res = 0;
if (ind < n) {
A += ind;
A += (n-1)*lda;
for (int j=n-1; j > ind; j--) {
res = max_nan( res, MAGMA_C_ABS( *A ));
A -= lda;
}
// diagonal element (ignoring imaginary part)
res = max_nan( res, MAGMA_D_ABS( MAGMA_C_REAL( *A )));
dwork[ind] = res;
}
}
/* Computes dwork[i] = max( abs( A(i,:) )), i=0:n-1, for ||A||_max */
extern "C" void
clanhe_max(
magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_const_ptr A, magma_int_t lda,
magmaFloat_ptr dwork,
magma_queue_t queue )
{
dim3 threads( max_bs );
dim3 grid( magma_ceildiv( n, max_bs ) );
if ( uplo == MagmaLower ) {
clanhe_max_kernel_lower<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork );
}
else {
clanhe_max_kernel_upper<<< grid, threads, 0, queue->cuda_stream() >>>
( n, A, lda, dwork );
}
}
/* ====================================================================== */
/**
Purpose
-------
CLANHE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
complex Hermitian matrix A.
CLANHE = ( max(abs(A(i,j))), NORM = MagmaMaxNorm
(
( norm1(A), NORM = MagmaOneNorm
(
( normI(A), NORM = MagmaInfNorm
(
( normF(A), NORM = MagmaFrobeniusNorm ** not yet supported
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of squares).
Note that max(abs(A(i,j))) is not a consistent matrix norm.
On error, returns CLANHE < 0: if CLANHE = -i, the i-th argument had an illegal value.
Arguments:
----------
@param[in]
norm magma_norm_t
Specifies the value to be returned in CLANHE as described above.
@param[in]
uplo magma_uplo_t
Specifies whether the upper or lower triangular part of the
Hermitian matrix A is to be referenced.
- = MagmaUpper: Upper triangular part of A is referenced
- = MagmaLower: Lower triangular part of A is referenced
@param[in]
n INTEGER
The order of the matrix A. N >= 0. When N = 0, CLANHE is
set to zero.
@param[in]
dA COMPLEX array on the GPU, dimension (LDDA,N)
The Hermitian matrix A. If UPLO = MagmaUpper, the leading n by n
upper triangular part of A contains the upper triangular part
of the matrix A, and the strictly lower triangular part of A
is not referenced. If UPLO = MagmaLower, the leading n by n lower
triangular part of A contains the lower triangular part of
the matrix A, and the strictly upper triangular part of A is
not referenced. Note that the imaginary parts of the diagonal
elements need not be set and are assumed to be zero.
@param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(N,1).
@param
dwork (workspace) REAL array on the GPU, dimension (MAX(1,LWORK)),
where LWORK >= N.
NOTE: this is different than LAPACK, where WORK is required
only for norm1 and normI. Here max-norm also requires WORK.
@param[in]
lwork INTEGER
The dimension of the array DWORK. LWORK >= max( 1, N ).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_caux2
********************************************************************/
extern "C" float
magmablas_clanhe_q(
magma_norm_t norm, magma_uplo_t uplo, magma_int_t n,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloat_ptr dwork, magma_int_t lwork,
magma_queue_t queue )
{
magma_int_t info = 0;
// 1-norm == inf-norm since A is Hermitian
bool inf_norm = (norm == MagmaInfNorm || norm == MagmaOneNorm);
bool max_norm = (norm == MagmaMaxNorm);
// inf_norm Double-Complex requires > 16 KB shared data (arch >= 200)
#if defined(PRECISION_z)
const bool inf_implemented = (magma_getdevice_arch() >= 200);
#else
const bool inf_implemented = true;
#endif
if ( ! (max_norm || (inf_norm && inf_implemented)) )
info = -1;
else if ( uplo != MagmaUpper && uplo != MagmaLower )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < n )
info = -5;
else if ( lwork < n )
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return info;
}
/* Quick return */
if ( n == 0 )
return 0;
float res = 0;
if ( inf_norm ) {
clanhe_inf( uplo, n, dA, ldda, dwork, queue );
}
else {
clanhe_max( uplo, n, dA, ldda, dwork, queue );
}
magma_max_nan_kernel<<< 1, 512, 0, queue->cuda_stream() >>>( n, dwork );
magma_sgetvector( 1, &dwork[0], 1, &res, 1, queue );
return res;
}
|
bdb9da28af38fb7e83ed7094f2df768cff71059b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "THHAtomics.cuh"
#include "common.h"
#include <THH/THHApply.cuh>
#include <thrust/functional.h>
template <typename T, typename AccumT>
__global__ void cunn_SpatialWeightedClassNLLCriterion_updateOutput_kernel(
T *output,
T *total_weight,
T *input,
THCIndex_t *target,
T *weights,
T *spatialWeights,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample)
{
__shared__ AccumT partial_sums[CUDA_NUM_THREADS];
int i, t;
T cur_weight;
T cur_spWeight;
AccumT input_sum = 0;
AccumT acc_weight = 0;
int sample = blockIdx.x / blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
int step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = target[toffset + i] - TH_INDEX_BASE;
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1);
cur_spWeight = spatialWeights ? spatialWeights[toffset + i] : ScalarConvert<int, T>::to(1);
input_sum -= input[ioffset + i + map_nelem * t] * cur_weight * cur_spWeight;
acc_weight += cur_weight * cur_spWeight;
}
__syncthreads();
input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0));
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0));
if (threadIdx.x == 0) {
atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight));
if (size_average && acc_weight > 0)
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum / acc_weight / gridDim.x));
else
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum));
}
}
template<typename T>
__global__ void cunn_SpatialWeightedClassNLLCriterion_updateGradInput_kernel(
T *gradInput,
THCIndex_t *target,
T *weights,
T *spatialWeights,
T *total_weight,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample)
{
if (*total_weight <= 0)
return;
int i, t;
T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1);
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = (int)target[toffset + i] - TH_INDEX_BASE;
assert(t >= 0 && t < n_classes);
gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1))* (spatialWeights ? spatialWeights[toffset + i] : ScalarConvert<int, T>::to(1)) * norm;
}
}
#include "generic/SpatialWeightedClassNLLCriterion.cu"
#include "THHGenerateFloatTypes.h"
| bdb9da28af38fb7e83ed7094f2df768cff71059b.cu | #include "THCUNN.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "THCAtomics.cuh"
#include "common.h"
#include <THC/THCApply.cuh>
#include <thrust/functional.h>
template <typename T, typename AccumT>
__global__ void cunn_SpatialWeightedClassNLLCriterion_updateOutput_kernel(
T *output,
T *total_weight,
T *input,
THCIndex_t *target,
T *weights,
T *spatialWeights,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample)
{
__shared__ AccumT partial_sums[CUDA_NUM_THREADS];
int i, t;
T cur_weight;
T cur_spWeight;
AccumT input_sum = 0;
AccumT acc_weight = 0;
int sample = blockIdx.x / blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
int step = blockDim.x * blocks_per_sample;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = target[toffset + i] - TH_INDEX_BASE;
assert(t >= 0 && t < n_classes);
cur_weight = weights ? weights[t] : ScalarConvert<int, T>::to(1);
cur_spWeight = spatialWeights ? spatialWeights[toffset + i] : ScalarConvert<int, T>::to(1);
input_sum -= input[ioffset + i + map_nelem * t] * cur_weight * cur_spWeight;
acc_weight += cur_weight * cur_spWeight;
}
__syncthreads();
input_sum = reduceBlock(partial_sums, blockDim.x, input_sum, thrust::plus<AccumT>(), AccumT(0));
acc_weight = reduceBlock(partial_sums, blockDim.x, acc_weight, thrust::plus<AccumT>(), AccumT(0));
if (threadIdx.x == 0) {
atomicAdd(total_weight, ScalarConvert<AccumT, T>::to(acc_weight));
if (size_average && acc_weight > 0)
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum / acc_weight / gridDim.x));
else
atomicAdd(output, ScalarConvert<AccumT, T>::to(input_sum));
}
}
template<typename T>
__global__ void cunn_SpatialWeightedClassNLLCriterion_updateGradInput_kernel(
T *gradInput,
THCIndex_t *target,
T *weights,
T *spatialWeights,
T *total_weight,
int size_average,
int batch_size,
int n_classes,
int map_nelem,
int blocks_per_sample)
{
if (*total_weight <= 0)
return;
int i, t;
T norm = size_average ? (ScalarConvert<int, T>::to(1) / *total_weight) : ScalarConvert<int, T>::to(1);
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x;
i < map_nelem;
i += step) {
t = (int)target[toffset + i] - TH_INDEX_BASE;
assert(t >= 0 && t < n_classes);
gradInput[ioffset + i + map_nelem * t] = -(weights ? weights[t] : ScalarConvert<int, T>::to(1))* (spatialWeights ? spatialWeights[toffset + i] : ScalarConvert<int, T>::to(1)) * norm;
}
}
#include "generic/SpatialWeightedClassNLLCriterion.cu"
#include "THCGenerateFloatTypes.h"
|
337ea4c588220aa6ebd2fe73f16f702487ae3c82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::Mean,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::Mean,
margin
);
}
THCudaCheck(hipGetLastError());
}
else if (input->dim() == 2)
{
int nframe = input->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == Reduction::None)
{
THCTensor_(resize1d)(state, output, input->size(0));
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
THCudaCheck(hipGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
reduction == Reduction::Mean,
margin
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size(0), input->size(1),
reduction == Reduction::Mean,
margin
);
}
THCudaCheck(hipGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes());
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(hipGetLastError());
}
else if (input->dim() == 2)
{
int nframe = gradInput->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
hipLaunchKernelGGL(( cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal>) , dim3(blocks),dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(hipGetLastError());
}
else
{
AT_ERROR("non-empty vector or matrix expected, got ", input->sizes());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
| 337ea4c588220aa6ebd2fe73f16f702487ae3c82.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/MultiMarginCriterion.cu"
#else
// TODO: improve error messages
void THNN_(MultiMarginCriterion_updateOutput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *output,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 2, input, target);
input = THCTensor_(newContiguous)(state, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
THCTensor_(resize1d)(state, output, 1);
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::Mean,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(input, 0),
reduction == Reduction::Mean,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->dim() == 2)
{
int nframe = input->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(input->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (reduction == Reduction::None)
{
THCTensor_(resize1d)(state, output, input->size(0));
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
false,
margin
);
}
THCudaCheck(cudaGetLastError());
}
else
{
THCTensor_(resize1d)(state, output, 1);
THCTensor *output_ = THCTensor_(newWithSize1d)(state, input->size(0)); // tmp output buffer
if (p == 1)
{
cunn_MultiMarginCriterion_updateOutput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, input->size(1),
reduction == Reduction::Mean,
margin
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateOutput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, output_),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
input->size(0), input->size(1),
reduction == Reduction::Mean,
margin
);
}
THCudaCheck(cudaGetLastError());
float sum = THCTensor_(sumall)(state, output_);
THCTensor_(set1d)(state, output, 0, ScalarConvert<accreal, scalar_t>::to(sum));
THCTensor_(free)(state, output_);
}
}
else
{
AT_ERROR("non-empty vector or matrix expected, got sizes: ", input->sizes());
}
THCTensor_(free)(state, input);
if(weights)
THCTensor_(free)(state, weights);
}
void THNN_(MultiMarginCriterion_updateGradInput)(
THCState *state,
THCTensor *input,
THCIndexTensor *target,
THCTensor *gradOutput,
THCTensor *gradInput,
int64_t reduction,
int p,
THCTensor *weights,
accreal margin_)
{
scalar_t margin = ScalarConvert<accreal, scalar_t>::to(margin_);
THCUNN_assertSameGPU(state, 3, input, gradInput, target);
input = THCTensor_(newContiguous)(state, input);
gradOutput = THCTensor_(newContiguous)(state, gradOutput);
THCTensor_(resizeAs)(state, gradInput, input);
if(weights)
weights = THCTensor_(newContiguous)(state, weights);
if (THTensor_nDimensionLegacyNoScalars(input) == 1)
{
dim3 blocks(1);
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
1, THTensor_sizeLegacyNoScalars(gradInput, 0),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(cudaGetLastError());
}
else if (input->dim() == 2)
{
int nframe = gradInput->size(0);
THArgCheck(!target->is_empty() && (THTensor_nDimensionLegacyNoScalars(target) == 1) && (THTensor_sizeLegacyNoScalars(target, 0) == nframe), 3,
"inconsistent target size");
dim3 blocks(gradInput->size(0));
dim3 threads(MULTIMARGIN_THREADS);
if (p == 1)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<1, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
else if (p == 2)
{
cunn_MultiMarginCriterion_updateGradInput_kernel<2, scalar_t, accreal> <<<blocks,threads, 0, THCState_getCurrentStream(state)>>>(
THCTensor_(data)(state, gradInput),
THCTensor_(data)(state, gradOutput),
THCTensor_(data)(state, input),
THCIndexTensor_(data)(state, target),
weights ? THCTensor_(data)(state, weights) : NULL,
nframe, gradInput->size(1),
reduction == Reduction::Mean,
margin,
reduction != Reduction::None
);
}
THCudaCheck(cudaGetLastError());
}
else
{
AT_ERROR("non-empty vector or matrix expected, got ", input->sizes());
}
THCTensor_(free)(state, input);
THCTensor_(free)(state, gradOutput);
if(weights)
THCTensor_(free)(state, weights);
}
#endif
|
834cc08defc35f68bbf8173b42b23d9375b52a34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "CUDAkernel_accumulate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *buffer = NULL;
hipMalloc(&buffer, XSIZE*YSIZE);
int addSize = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
CUDAkernel_accumulate), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,addSize,size);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
CUDAkernel_accumulate), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,addSize,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
CUDAkernel_accumulate), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,addSize,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 834cc08defc35f68bbf8173b42b23d9375b52a34.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "CUDAkernel_accumulate.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *buffer = NULL;
cudaMalloc(&buffer, XSIZE*YSIZE);
int addSize = XSIZE*YSIZE;
int size = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
CUDAkernel_accumulate<<<gridBlock,threadBlock>>>(buffer,addSize,size);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
CUDAkernel_accumulate<<<gridBlock,threadBlock>>>(buffer,addSize,size);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
CUDAkernel_accumulate<<<gridBlock,threadBlock>>>(buffer,addSize,size);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b15be037899e2c469c51c3cc93c37d744ccf61bd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define n 10
__global__ void add(int*a, int*max)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
for(i=0;i<n;i++)
{
if(a[i]>*max)
*max=a[i];
}
}
}
int main()
{
int a[n];
int i;
int max;
int* dev_a;
int* dev_max;
hipMalloc((void**)&dev_max, sizeof(int));
hipMalloc((void**)&dev_a, n * sizeof(int));
printf("\narray elements:\n");
for(i=0;i<n;i++)
{
scanf("%d",&a[i]);
}
max = a[0];
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_max, &max, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, dev_a, dev_max);
hipMemcpy(&max, dev_max, sizeof(int), hipMemcpyDeviceToHost);
printf("\nMax is %d\n",max);
hipFree(dev_max);
hipFree(dev_a);
hipDeviceReset();
return 0;
}
| b15be037899e2c469c51c3cc93c37d744ccf61bd.cu | #include <stdio.h>
#include <cuda.h>
#define n 10
__global__ void add(int*a, int*max)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
for(i=0;i<n;i++)
{
if(a[i]>*max)
*max=a[i];
}
}
}
int main()
{
int a[n];
int i;
int max;
int* dev_a;
int* dev_max;
cudaMalloc((void**)&dev_max, sizeof(int));
cudaMalloc((void**)&dev_a, n * sizeof(int));
printf("\narray elements:\n");
for(i=0;i<n;i++)
{
scanf("%d",&a[i]);
}
max = a[0];
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_max, &max, sizeof(int), cudaMemcpyHostToDevice);
add<<<1,1>>>(dev_a, dev_max);
cudaMemcpy(&max, dev_max, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nMax is %d\n",max);
cudaFree(dev_max);
cudaFree(dev_a);
cudaDeviceReset();
return 0;
}
|
68d78703281732f2396384bbed55b5f4ca6d6f04.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/spmm.cu
* \brief SPMM C APIs and definitions.
*/
#include <dgl/array.h>
#include "./spmm.cuh"
#include "./ge_spmm.cuh"
#include "functor.cuh"
#include "../../runtime/cuda/cuda_common.h"
namespace dgl {
using namespace cuda;
namespace aten {
/*!
* \brief Determine whether cusparse SpMM function is applicable.
*/
template <int bits, typename IdType>
inline bool cusparse_available(bool more_nnz_than_matrix_size) {
#if CUDART_VERSION < 11000
if (std::is_same<IdType, int>::value)
if (bits > 16)
return true;
return false;
#else
if (bits == 16)
return false; // cusparse's SpMM on fp16 is slow, temporally disabled.
// If the CSR matrix has more NNZ than matrix size, we should not use cuSPARSE 11.1.
return !more_nnz_than_matrix_size;
#endif
}
/*!
* \brief CUDA implementation of g-SpMM on Csr format.
* \note use cusparse if the reduce operator is `sum` and there is
* no broadcast, use dgl's kernel in other cases.
*/
template <int XPU, typename IdType, int bits>
void SpMMCsr(const std::string& op, const std::string& reduce,
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat,
NDArray efeat,
NDArray out,
std::vector<NDArray> out_aux) {
int64_t feat_len = bcast.out_len;
bool is_scalar_efeat = efeat.NumElements() == csr.indices->shape[0];
bool use_efeat = op != "copy_lhs";
if (reduce == "sum") {
bool more_nnz = (csr.indices->shape[0] > csr.num_rows * csr.num_cols);
if (op == "copy_lhs" && cusparse_available<bits, IdType>(more_nnz)) {
// cusparse
int64_t x_length = 1;
for (int i = 1; i < ufeat->ndim; ++i)
x_length *= ufeat->shape[i];
SWITCH_BITS(bits, DType, {
CusparseCsrmm2<DType, IdType>(
ufeat->ctx, csr,
static_cast<DType*>(ufeat->data),
nullptr,
static_cast<DType*>(out->data),
x_length);
});
} else if (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(more_nnz)) {
// cusparse
int64_t x_length = 1;
for (int i = 1; i < ufeat->ndim; ++i)
x_length *= ufeat->shape[i];
if (!IsNullArray(csr.data)) {
SWITCH_BITS(bits, DType, {
efeat = _IndexSelect<DType, IdType>(efeat, csr.data);
});
}
SWITCH_BITS(bits, DType, {
CusparseCsrmm2<DType, IdType>(
ufeat->ctx, csr,
static_cast<DType*>(ufeat->data),
static_cast<DType*>(efeat->data),
static_cast<DType*>(out->data),
x_length);
});
} else { // general kernel
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >(
bcast, csr, ufeat, efeat, out, NullArray(), NullArray());
});
});
}
} else if (reduce == "max") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >(
bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else if (reduce == "min") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >(
bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else {
LOG(FATAL) << "Not implemented";
}
}
/*!
* \brief CUDA implementation of g-SpMM on Coo format.
*/
template <int XPU, typename IdType, int bits>
void SpMMCoo(const std::string& op, const std::string& reduce,
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat,
NDArray efeat,
NDArray out,
std::vector<NDArray> out_aux) {
if (reduce == "sum") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Sum<IdType, DType, true> > (
bcast, coo, ufeat, efeat, out, NullArray(), NullArray());
});
});
} else if (reduce == "max") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Max<IdType, DType, true> > (
bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else if (reduce == "min") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Min<IdType, DType, true> > (
bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else {
LOG(FATAL) << "Not implemented";
}
}
template void SpMMCsr<kDLGPU, int32_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int64_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int32_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int64_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int32_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int64_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int32_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int64_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int32_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int64_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int32_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int64_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
} // namespace aten
} // namespace dgl
| 68d78703281732f2396384bbed55b5f4ca6d6f04.cu | /*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/spmm.cu
* \brief SPMM C APIs and definitions.
*/
#include <dgl/array.h>
#include "./spmm.cuh"
#include "./ge_spmm.cuh"
#include "./functor.cuh"
#include "../../runtime/cuda/cuda_common.h"
namespace dgl {
using namespace cuda;
namespace aten {
/*!
* \brief Determine whether cusparse SpMM function is applicable.
*/
template <int bits, typename IdType>
inline bool cusparse_available(bool more_nnz_than_matrix_size) {
#if CUDART_VERSION < 11000
if (std::is_same<IdType, int>::value)
if (bits > 16)
return true;
return false;
#else
if (bits == 16)
return false; // cusparse's SpMM on fp16 is slow, temporally disabled.
// If the CSR matrix has more NNZ than matrix size, we should not use cuSPARSE 11.1.
return !more_nnz_than_matrix_size;
#endif
}
/*!
* \brief CUDA implementation of g-SpMM on Csr format.
* \note use cusparse if the reduce operator is `sum` and there is
* no broadcast, use dgl's kernel in other cases.
*/
template <int XPU, typename IdType, int bits>
void SpMMCsr(const std::string& op, const std::string& reduce,
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat,
NDArray efeat,
NDArray out,
std::vector<NDArray> out_aux) {
int64_t feat_len = bcast.out_len;
bool is_scalar_efeat = efeat.NumElements() == csr.indices->shape[0];
bool use_efeat = op != "copy_lhs";
if (reduce == "sum") {
bool more_nnz = (csr.indices->shape[0] > csr.num_rows * csr.num_cols);
if (op == "copy_lhs" && cusparse_available<bits, IdType>(more_nnz)) {
// cusparse
int64_t x_length = 1;
for (int i = 1; i < ufeat->ndim; ++i)
x_length *= ufeat->shape[i];
SWITCH_BITS(bits, DType, {
CusparseCsrmm2<DType, IdType>(
ufeat->ctx, csr,
static_cast<DType*>(ufeat->data),
nullptr,
static_cast<DType*>(out->data),
x_length);
});
} else if (op == "mul" && is_scalar_efeat && cusparse_available<bits, IdType>(more_nnz)) {
// cusparse
int64_t x_length = 1;
for (int i = 1; i < ufeat->ndim; ++i)
x_length *= ufeat->shape[i];
if (!IsNullArray(csr.data)) {
SWITCH_BITS(bits, DType, {
efeat = _IndexSelect<DType, IdType>(efeat, csr.data);
});
}
SWITCH_BITS(bits, DType, {
CusparseCsrmm2<DType, IdType>(
ufeat->ctx, csr,
static_cast<DType*>(ufeat->data),
static_cast<DType*>(efeat->data),
static_cast<DType*>(out->data),
x_length);
});
} else { // general kernel
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Sum<IdType, DType> >(
bcast, csr, ufeat, efeat, out, NullArray(), NullArray());
});
});
}
} else if (reduce == "max") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Max<IdType, DType> >(
bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else if (reduce == "min") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCsr<IdType, DType, Op, cuda::reduce::Min<IdType, DType> >(
bcast, csr, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else {
LOG(FATAL) << "Not implemented";
}
}
/*!
* \brief CUDA implementation of g-SpMM on Coo format.
*/
template <int XPU, typename IdType, int bits>
void SpMMCoo(const std::string& op, const std::string& reduce,
const BcastOff& bcast,
const COOMatrix& coo,
NDArray ufeat,
NDArray efeat,
NDArray out,
std::vector<NDArray> out_aux) {
if (reduce == "sum") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Sum<IdType, DType, true> > (
bcast, coo, ufeat, efeat, out, NullArray(), NullArray());
});
});
} else if (reduce == "max") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Max<IdType, DType, true> > (
bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else if (reduce == "min") {
SWITCH_BITS(bits, DType, {
SWITCH_OP(op, Op, {
cuda::SpMMCoo<IdType, DType, Op, cuda::reduce::Min<IdType, DType, true> > (
bcast, coo, ufeat, efeat, out, out_aux[0], out_aux[1]);
});
});
} else {
LOG(FATAL) << "Not implemented";
}
}
template void SpMMCsr<kDLGPU, int32_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int64_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int32_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int64_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int32_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCsr<kDLGPU, int64_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int32_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int64_t, 16>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int32_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int64_t, 32>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int32_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
template void SpMMCoo<kDLGPU, int64_t, 64>(
const std::string& op, const std::string& reduce,
const BcastOff& bcast, const COOMatrix& coo,
NDArray ufeat, NDArray efeat, NDArray out, std::vector<NDArray> out_aux);
} // namespace aten
} // namespace dgl
|
a370c7726d41953cda7248425b65fcf7c1974c07.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/ols_mg.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <linalg/add.cuh>
#include <linalg/gemm.cuh>
#include <matrix/math.cuh>
#include <matrix/matrix.cuh>
#include <opg/linalg/lstsq.hpp>
#include <opg/stats/mean.hpp>
#include <raft/comms/comms.hpp>
using namespace MLCommon;
namespace ML {
namespace OLS {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept,
bool fit_intercept, bool normalize, int algo,
hipStream_t *streams, int n_streams, bool verbose) {
const auto &comm = handle.get_comms();
hipblasHandle_t cublas_handle = handle.get_cublas_handle();
hipsolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
device_buffer<T> mu_input(allocator, streams[0]);
device_buffer<T> norm2_input(allocator, streams[0]);
device_buffer<T> mu_labels(allocator, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) {
norm2_input.resize(input_desc.N, streams[0]);
}
GLM::opg::preProcessData(handle, input_data, input_desc, labels,
mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
LinAlg::opg::lstsqEig(input_data, input_desc, labels, coef, comm, allocator,
streams, n_streams, cublas_handle, cusolver_handle);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef,
intercept, mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ols
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param labels: labels data
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept,
bool fit_intercept, bool normalize, int algo, bool verbose) {
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
int n_streams = input_desc.blocksOwnedBy(rank).size();
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
fit_impl(handle, input_data, input_desc, labels, coef, intercept,
fit_intercept, normalize, algo, streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc, T *coef, T intercept,
std::vector<Matrix::Data<T> *> &preds, hipStream_t *streams,
int n_streams, bool verbose) {
std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (int i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef,
preds[i]->ptr, local_blocks[i]->size, size_t(1), HIPBLAS_OP_N,
HIPBLAS_OP_N, alpha, beta, handle.get_cublas_handle(),
streams[si]);
LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept,
local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<T> **input, size_t n_rows,
size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds,
bool verbose) {
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes,
rank_sizes + n_parts);
std::vector<Matrix::Data<T> *> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts);
// TODO: These streams should come from raft::handle_t
int n_streams = n_parts;
hipStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamCreate(&streams[i]));
}
predict_impl(handle, input_data, input_desc, coef, intercept, preds_data,
streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(hipStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *coef,
float *intercept, bool fit_intercept, bool normalize, int algo,
bool verbose) {
fit_impl(handle, input_data, input_desc, labels, coef, intercept,
fit_intercept, normalize, algo, verbose);
}
void fit(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *coef,
double *intercept, bool fit_intercept, bool normalize, int algo,
bool verbose) {
fit_impl(handle, input_data, input_desc, labels, coef, intercept,
fit_intercept, normalize, algo, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<float> **input, size_t n_rows,
size_t n_cols, float *coef, float intercept,
Matrix::Data<float> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<double> **input, size_t n_rows,
size_t n_cols, double *coef, double intercept,
Matrix::Data<double> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
} // namespace opg
} // namespace OLS
} // namespace ML
| a370c7726d41953cda7248425b65fcf7c1974c07.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cumlHandle.hpp>
#include <common/device_buffer.hpp>
#include <cuda_utils.cuh>
#include <cuml/common/cuml_allocator.hpp>
#include <cuml/linear_model/ols_mg.hpp>
#include <cuml/linear_model/preprocess_mg.hpp>
#include <linalg/add.cuh>
#include <linalg/gemm.cuh>
#include <matrix/math.cuh>
#include <matrix/matrix.cuh>
#include <opg/linalg/lstsq.hpp>
#include <opg/stats/mean.hpp>
#include <raft/comms/comms.hpp>
using namespace MLCommon;
namespace ML {
namespace OLS {
namespace opg {
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept,
bool fit_intercept, bool normalize, int algo,
cudaStream_t *streams, int n_streams, bool verbose) {
const auto &comm = handle.get_comms();
cublasHandle_t cublas_handle = handle.get_cublas_handle();
cusolverDnHandle_t cusolver_handle = handle.get_cusolver_dn_handle();
const auto allocator = handle.get_device_allocator();
device_buffer<T> mu_input(allocator, streams[0]);
device_buffer<T> norm2_input(allocator, streams[0]);
device_buffer<T> mu_labels(allocator, streams[0]);
if (fit_intercept) {
mu_input.resize(input_desc.N, streams[0]);
mu_labels.resize(1, streams[0]);
if (normalize) {
norm2_input.resize(input_desc.N, streams[0]);
}
GLM::opg::preProcessData(handle, input_data, input_desc, labels,
mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
}
if (algo == 0 || input_desc.N == 1) {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
} else if (algo == 1) {
LinAlg::opg::lstsqEig(input_data, input_desc, labels, coef, comm, allocator,
streams, n_streams, cublas_handle, cusolver_handle);
} else {
ASSERT(false, "olsFit: no algorithm with this id has been implemented");
}
if (fit_intercept) {
GLM::opg::postProcessData(handle, input_data, input_desc, labels, coef,
intercept, mu_input.data(), mu_labels.data(),
norm2_input.data(), fit_intercept, normalize,
streams, n_streams, verbose);
} else {
*intercept = T(0);
}
}
/**
* @brief performs MNMG fit operation for the ols
* @input param handle: the internal cuml handle object
* @input param rank_sizes: includes all the partition size information for the rank
* @input param n_parts: number of partitions
* @input param input: input data
* @input param labels: labels data
* @output param coef: learned regression coefficients
* @output param intercept: intercept value
* @input param fit_intercept: fit intercept or not
* @input param normalize: normalize the data or not
* @input param verbose
*/
template <typename T>
void fit_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<T> *> &labels, T *coef, T *intercept,
bool fit_intercept, bool normalize, int algo, bool verbose) {
int rank = handle.get_comms().get_rank();
// TODO: These streams should come from raft::handle_t
int n_streams = input_desc.blocksOwnedBy(rank).size();
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
fit_impl(handle, input_data, input_desc, labels, coef, intercept,
fit_intercept, normalize, algo, streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
template <typename T>
void predict_impl(raft::handle_t &handle,
std::vector<Matrix::Data<T> *> &input_data,
Matrix::PartDescriptor &input_desc, T *coef, T intercept,
std::vector<Matrix::Data<T> *> &preds, cudaStream_t *streams,
int n_streams, bool verbose) {
std::vector<Matrix::RankSizePair *> local_blocks = input_desc.partsToRanks;
T alpha = T(1);
T beta = T(0);
for (int i = 0; i < input_data.size(); i++) {
int si = i % n_streams;
LinAlg::gemm(input_data[i]->ptr, local_blocks[i]->size, input_desc.N, coef,
preds[i]->ptr, local_blocks[i]->size, size_t(1), CUBLAS_OP_N,
CUBLAS_OP_N, alpha, beta, handle.get_cublas_handle(),
streams[si]);
LinAlg::addScalar(preds[i]->ptr, preds[i]->ptr, intercept,
local_blocks[i]->size, streams[si]);
}
}
template <typename T>
void predict_impl(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<T> **input, size_t n_rows,
size_t n_cols, T *coef, T intercept, Matrix::Data<T> **preds,
bool verbose) {
int rank = handle.get_comms().get_rank();
std::vector<Matrix::RankSizePair *> ranksAndSizes(rank_sizes,
rank_sizes + n_parts);
std::vector<Matrix::Data<T> *> input_data(input, input + n_parts);
Matrix::PartDescriptor input_desc(n_rows, n_cols, ranksAndSizes, rank);
std::vector<Matrix::Data<T> *> preds_data(preds, preds + n_parts);
// TODO: These streams should come from raft::handle_t
int n_streams = n_parts;
cudaStream_t streams[n_streams];
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamCreate(&streams[i]));
}
predict_impl(handle, input_data, input_desc, coef, intercept, preds_data,
streams, n_streams, verbose);
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamSynchronize(streams[i]));
}
for (int i = 0; i < n_streams; i++) {
CUDA_CHECK(cudaStreamDestroy(streams[i]));
}
}
void fit(raft::handle_t &handle, std::vector<Matrix::Data<float> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<float> *> &labels, float *coef,
float *intercept, bool fit_intercept, bool normalize, int algo,
bool verbose) {
fit_impl(handle, input_data, input_desc, labels, coef, intercept,
fit_intercept, normalize, algo, verbose);
}
void fit(raft::handle_t &handle,
std::vector<Matrix::Data<double> *> &input_data,
Matrix::PartDescriptor &input_desc,
std::vector<Matrix::Data<double> *> &labels, double *coef,
double *intercept, bool fit_intercept, bool normalize, int algo,
bool verbose) {
fit_impl(handle, input_data, input_desc, labels, coef, intercept,
fit_intercept, normalize, algo, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<float> **input, size_t n_rows,
size_t n_cols, float *coef, float intercept,
Matrix::Data<float> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
void predict(raft::handle_t &handle, Matrix::RankSizePair **rank_sizes,
size_t n_parts, Matrix::Data<double> **input, size_t n_rows,
size_t n_cols, double *coef, double intercept,
Matrix::Data<double> **preds, bool verbose) {
predict_impl(handle, rank_sizes, n_parts, input, n_rows, n_cols, coef,
intercept, preds, verbose);
}
} // namespace opg
} // namespace OLS
} // namespace ML
|
bd67bd9a4ef29f282093e698a147b1e85d17881e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Author: Eric Dazet (edazet) and Nathik Salam (nsalam)
* CPE 419
* 2 October 2015
*
* Assignment 2: CUDA Matrix Multiplication
*/
#include "matrixmul.h"
// Function that allocates memory for the matrices
_data_type *allocateMemory(int width) {
_data_type *mat;
if ((mat = (_data_type *)malloc(width * width * sizeof(_data_type))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
return mat;
}
// Reads input file into a matrix
_data_type *readFile(char *fileName, int *width) {
int i;
struct stat st;
char *iter;
FILE *inFile;
_data_type *mat = NULL, val;
if ((inFile = fopen(fileName, "r")) == NULL) {
fprintf(stderr, "FILE IO ERROR: %s\n", strerror(errno));
exit(1);
}
if ((i = fstat(fileno(inFile), &st)) < 0) {
fprintf(stderr, "STAT ERROR! ERROR: %s\n", strerror(errno));
exit(1);
}
if ((iter = (char *)mmap(0, st.st_size, PROT_READ, MAP_PRIVATE,
fileno(inFile), 0)) == MAP_FAILED) {
fprintf(stderr, "MMAP ERROR! ERROR: %s\n", strerror(errno));
exit(1);
}
// determines the width of the matrix
for (i = 0; i < st.st_size && iter[i] != '\n'; i++) {
if (iter[i] == ' ')
(*width)++;
}
if ((i = munmap(iter, st.st_size)) == -1) {
fprintf(stderr, "MUNMAP ERROR! ERROR: %s\n", strerror(errno));
exit(1);
}
mat = allocateMemory(*width);
// loads matrix
for (i = 0; i < *width * *width; i++) {
fscanf(inFile, format, &val);
mat[i] = val;
}
fclose(inFile);
return mat;
}
// writes the product matrix to an output file
void outputMatrix(_data_type *mat, int width) {
FILE *outFile = fopen(output, "w+");
int i, j;
for (i = 0; i < width; i++) {
for (j = 0; j < width; j++)
fprintf(outFile, printFormat, mat[i * width + j]);
fprintf(outFile, "\n");
}
fclose(outFile);
}
// GPU function: matrix multiplication per thread
__global__ void MatMulKernel (_data_type *Md, _data_type *Nd, _data_type *Pd, int width) {
int row = blockIdx.y * TILEWIDTH + threadIdx.y;
int col = blockIdx.x * TILEWIDTH + threadIdx.x;
float pVal = 0;
int k;
for (k = 0; k < width; k++)
pVal += Md[row * width + k] * Nd[k * width + col];
Pd[row * width + col] = pVal;
}
// GPU setup function
void matrixMulOnDevice (_data_type *m, _data_type *n, _data_type *p, int width) {
int size = width * width * sizeof(_data_type); //TILEWIDTH = 32, in header file
int blocks = width / TILEWIDTH; //determines number of blocks in one dimension
_data_type *Md, *Nd, *Pd;
// Allocates space and moves matrices to GPU
hipMalloc(&Md, size);
hipMemcpy(Md, m, size, hipMemcpyHostToDevice);
hipMalloc(&Nd, size);
hipMemcpy(Nd, n, size, hipMemcpyHostToDevice);
hipMalloc(&Pd, size);
// Launch Kernel
dim3 dimGrid(blocks, blocks); //grid = blocks x blocks
dim3 dimBlock(TILEWIDTH, TILEWIDTH); //32 x 32 = 1024 threads per block
// Calls the matrix multiply function and writes data from GPU to host
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Pd, width);
hipMemcpy(p, Pd, size, hipMemcpyDeviceToHost);
outputMatrix(p, width);
// Frees GPU memory
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
}
int main(int argc, char **argv) {
_data_type *m = NULL, *n = NULL, *p;
int widthM = 0, widthN = 0;
if (argc != 3) {
fprintf(stderr, "PLEASE SPECIFY FILES! ERROR: %s\n", strerror(errno));
exit(1);
}
// Loads the first input matrix
m = readFile(argv[1], &widthM);
// Loads the second input matrix
n = readFile(argv[2], &widthN);
// Checks to see if the input matrices can be multiplied
if (widthM != widthN) {
fprintf(stderr, "INVALID MATRICES! ERROR: %d != %d\n", widthM, widthN);
exit(1);
}
// Allocates memory for the product matrix
p = allocateMemory(widthM);
// Calls the GPU prep function
matrixMulOnDevice(m, n, p, widthM);
// Frees matrix memory
free(m);
free(n);
free(p);
return 0;
}
| bd67bd9a4ef29f282093e698a147b1e85d17881e.cu | /*
* Author: Eric Dazet (edazet) and Nathik Salam (nsalam)
* CPE 419
* 2 October 2015
*
* Assignment 2: CUDA Matrix Multiplication
*/
#include "matrixmul.h"
// Function that allocates memory for the matrices
_data_type *allocateMemory(int width) {
_data_type *mat;
if ((mat = (_data_type *)malloc(width * width * sizeof(_data_type))) == NULL) {
fprintf(stderr, "MALLOC ERROR: %s\n", strerror(errno));
exit(1);
}
return mat;
}
// Reads input file into a matrix
_data_type *readFile(char *fileName, int *width) {
int i;
struct stat st;
char *iter;
FILE *inFile;
_data_type *mat = NULL, val;
if ((inFile = fopen(fileName, "r")) == NULL) {
fprintf(stderr, "FILE IO ERROR: %s\n", strerror(errno));
exit(1);
}
if ((i = fstat(fileno(inFile), &st)) < 0) {
fprintf(stderr, "STAT ERROR! ERROR: %s\n", strerror(errno));
exit(1);
}
if ((iter = (char *)mmap(0, st.st_size, PROT_READ, MAP_PRIVATE,
fileno(inFile), 0)) == MAP_FAILED) {
fprintf(stderr, "MMAP ERROR! ERROR: %s\n", strerror(errno));
exit(1);
}
// determines the width of the matrix
for (i = 0; i < st.st_size && iter[i] != '\n'; i++) {
if (iter[i] == ' ')
(*width)++;
}
if ((i = munmap(iter, st.st_size)) == -1) {
fprintf(stderr, "MUNMAP ERROR! ERROR: %s\n", strerror(errno));
exit(1);
}
mat = allocateMemory(*width);
// loads matrix
for (i = 0; i < *width * *width; i++) {
fscanf(inFile, format, &val);
mat[i] = val;
}
fclose(inFile);
return mat;
}
// writes the product matrix to an output file
void outputMatrix(_data_type *mat, int width) {
FILE *outFile = fopen(output, "w+");
int i, j;
for (i = 0; i < width; i++) {
for (j = 0; j < width; j++)
fprintf(outFile, printFormat, mat[i * width + j]);
fprintf(outFile, "\n");
}
fclose(outFile);
}
// GPU function: matrix multiplication per thread
__global__ void MatMulKernel (_data_type *Md, _data_type *Nd, _data_type *Pd, int width) {
int row = blockIdx.y * TILEWIDTH + threadIdx.y;
int col = blockIdx.x * TILEWIDTH + threadIdx.x;
float pVal = 0;
int k;
for (k = 0; k < width; k++)
pVal += Md[row * width + k] * Nd[k * width + col];
Pd[row * width + col] = pVal;
}
// GPU setup function
void matrixMulOnDevice (_data_type *m, _data_type *n, _data_type *p, int width) {
int size = width * width * sizeof(_data_type); //TILEWIDTH = 32, in header file
int blocks = width / TILEWIDTH; //determines number of blocks in one dimension
_data_type *Md, *Nd, *Pd;
// Allocates space and moves matrices to GPU
cudaMalloc(&Md, size);
cudaMemcpy(Md, m, size, cudaMemcpyHostToDevice);
cudaMalloc(&Nd, size);
cudaMemcpy(Nd, n, size, cudaMemcpyHostToDevice);
cudaMalloc(&Pd, size);
// Launch Kernel
dim3 dimGrid(blocks, blocks); //grid = blocks x blocks
dim3 dimBlock(TILEWIDTH, TILEWIDTH); //32 x 32 = 1024 threads per block
// Calls the matrix multiply function and writes data from GPU to host
MatMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
cudaMemcpy(p, Pd, size, cudaMemcpyDeviceToHost);
outputMatrix(p, width);
// Frees GPU memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
int main(int argc, char **argv) {
_data_type *m = NULL, *n = NULL, *p;
int widthM = 0, widthN = 0;
if (argc != 3) {
fprintf(stderr, "PLEASE SPECIFY FILES! ERROR: %s\n", strerror(errno));
exit(1);
}
// Loads the first input matrix
m = readFile(argv[1], &widthM);
// Loads the second input matrix
n = readFile(argv[2], &widthN);
// Checks to see if the input matrices can be multiplied
if (widthM != widthN) {
fprintf(stderr, "INVALID MATRICES! ERROR: %d != %d\n", widthM, widthN);
exit(1);
}
// Allocates memory for the product matrix
p = allocateMemory(widthM);
// Calls the GPU prep function
matrixMulOnDevice(m, n, p, widthM);
// Frees matrix memory
free(m);
free(n);
free(p);
return 0;
}
|
81fba073155ee43db028eb6ebad8fc032f212c0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "convolutional_gpu_layer.h"
#include "batchnorm_gpu_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.f);
int h_offset = -(size/2.f);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
}
extern "C" void smooth_layer(layer l, int size, float rate)
{
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
size_t n = h*w*c*l.batch;
hipLaunchKernelGGL(( smooth_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, 0, l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu);
check_error(hipPeekAtLastError());
}
| 81fba073155ee43db028eb6ebad8fc032f212c0a.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "convolutional_gpu_layer.h"
#include "batchnorm_gpu_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -(size/2.f);
int h_offset = -(size/2.f);
int out_index = j + w*(i + h*(k + c*b));
int l, m;
for(l = 0; l < size; ++l){
for(m = 0; m < size; ++m){
int cur_h = h_offset + i + l;
int cur_w = w_offset + j + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
}
}
}
extern "C" void smooth_layer(layer l, int size, float rate)
{
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
size_t n = h*w*c*l.batch;
smooth_kernel<<<cuda_gridsize(n), BLOCK>>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu);
check_error(cudaPeekAtLastError());
}
|
65c93b9841c61354c3e606296f578cd44fb9f719.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_header.h"
#include "cs_dbg.h"
#include "cs_cuda.h"
#include "cs_helper.h"
#include "cs_motion_detect_v2.h"
#include "cs_analysis.h"
/*
Fri Apr 24 17:46:50 EDT 2015
version 2 will only match the first md block with the subsequent md_z -1 block to do
the L1 norm calculation. which works fine, but not too accurate, v3 will change that
to compare the "center" md blocks across the termperal domain
*/
#define CUDA_DBG
// #define CUDA_OBS
// md_x/y/z: total size ... so it is md_x'*2, md_y'*2 and md_z
// tbl_size, // does not include the 3 indexes
// record_size, // do not include the 3 indexes
// hvt_size, // number of combination of h/v/t
// from_blk_size, // the size of the input inner block ... after edge
template<typename T>
__global__ void d_do_motion_detection_step0_v2 (
T *fdp, T *tdp,
int tbl_size,
int record_size,
int hvt_size,
int md_x, int md_y, int md_z,
struct cube *dxyzp,
int from_blk_size )
{
int mx, mxy_size ;
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int ot_idx, blk_idx, blk_size, blk_type_idx, cx, cxy_size,
md_z_len, i, j, from, h, v, t, tt, hh, vv ;
T *ofdp, *otdp ;
int *ip ;
#ifdef CUDA_OBS
int *dbp ;
#endif
#ifdef CUDA_OBS
dbp = fdp ;
#endif
ot_idx = t_idx ;
ofdp = fdp ;
otdp = tdp ;
while ( t_idx < tbl_size )
{
fdp = ofdp ;
tdp = otdp ;
blk_size = hvt_size * record_size ; // LDL can be moved out of while loop
blk_idx = t_idx / blk_size ;
t_idx -= blk_idx * blk_size ; // index into this block
// tdp is pointing at the beginning of the to-block
tdp += blk_idx * ( record_size + NUM_OF_HVT_INDEX ) * hvt_size ;
ip = ( int * ) tdp ;
j = *ip ;
blk_type_idx = CUBE_INFO_GET( j ) ;
// from block info
cx = dxyzp[ blk_type_idx ].x ;
cxy_size = cx * dxyzp[ blk_type_idx ].y ;
// to block info
mx = cx - md_x ;
mxy_size = mx * ( dxyzp[ blk_type_idx ].y - md_y ) ;
md_z_len = dxyzp[ blk_type_idx ].z - md_z + 1 ;
#ifdef CUDA_OBS
*dbp++ = blk_type_idx ;
*dbp++ = cx ;
*dbp++ = cxy_size ;
*dbp++ = blk_idx ;
*dbp++ = mx ;
*dbp++ = mxy_size ;
*dbp++ = t_idx ;
*dbp++ = from_blk_size ;
*dbp++ = md_z_len ;
#endif
fdp += blk_idx * from_blk_size ; // adjust the from
// now ftp is pointing at the beginning of the from-block
// starts from here ... same as single block func in version 1
i = t_idx / record_size ;
tdp += i * ( record_size + NUM_OF_HVT_INDEX ) ; // beginning of record
ip = ( int * ) tdp ;
t = ( *ip++ ) & CUBE_INFO_T_MSK ;
v = *ip++ ;
h = *ip++ ;
tdp += NUM_OF_HVT_INDEX ;
t_idx %= record_size ; // inside this record
tt = t_idx / mxy_size ; // which frame
if ( tt < md_z_len )
{
j = t_idx % mxy_size ;
hh = j % mx ; // which h
vv = j / mx ; // which v
// from = t * cxy_size + ( v + vv ) * cx + h + hh ;
// serial from = ( t + tt ) * mxy_size + ( v + vv ) * mx + h + hh ;
from = ( t + tt ) * cxy_size + ( v + vv ) * cx + h + hh ;
#ifdef CUDA_OBS
*dbp++ = 2222222 ;
*dbp++ = t ;
*dbp++ = tt ;
*dbp++ = i ;
*dbp++ = t ;
*dbp++ = v ;
*dbp++ = h ;
*dbp++ = t_idx ;
*dbp++ = tt ;
*dbp++ = hh ;
*dbp++ = vv ;
*dbp++ = from ;
*dbp++ = fdp[ from ] ;
*dbp++ = 99999999 ;
#endif
tdp[ t_idx ] = fdp[ from ] ;
}
#ifdef CUDA_DBG
else
tdp[ t_idx ] = 2222 ;
#endif
ot_idx += CUDA_MAX_THREADS ;
t_idx = ot_idx ;
}
}
// step0: copy the data into the motion array ...
// block : the result of do_motion_idx <- edge-detection <- L-selection
// cube : the cube that is going to be moved by all h/v/t units
// fromp has the edged data ... top has the TVH index
// md_x is csc.md_x * 2, same to md_y. md_z is the same as csc.md_z
template<typename T>
int
h_do_motion_detection_step0_v2 ( T *fromp, T *top,
int tbl_size, // overall input size ... excludes the 3 indexes
int record_size, // do not includes the 3 indexes
int md_x, int md_y, int md_z,
struct cube *d_xyzp, // cube in device // will have the size of the
int hvt_size,
int from_block_size ) // new
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; //= ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: fromp %p to %p size %d record %d mdxyz %d %d %d "
"hvt %d from_block_size %d\n",
__func__, fromp, top, tbl_size, record_size, md_x, md_y, md_z,
hvt_size, from_block_size ) ;
dbg_p_d_data_i ("xyz_table", ( int *)d_xyzp, CUBE_INFO_CNT * sizeof ( *d_xyzp ) / sizeof( int )) ;
#endif
#ifdef CUDA_OBS
if (( tbl_size % number_blocks ) || ( tbl_size % cube_xy ) ||
( record_size % cube_xy ))
{
fprintf(stderr, "%s: error size %d cube %d rec %d nblks %d\n",
__func__, tbl_size, cube_xy, record_size, number_blocks ) ;
return ( 0 ) ;
}
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_motion_detection_step0_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
fromp, top,
tbl_size, // does not include the 3 indexes
record_size, // do not include the 3 indexes
hvt_size, // number of combination of h/v/t
md_x, md_y, md_z,
d_xyzp, from_block_size ) ;
hipDeviceSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("motion_detect", top, tbl_size ) ;
#endif
return ( 1 ) ;
}
template int
h_do_motion_detection_step0_v2<int> ( int *fromp, int *top,
int tbl_size, // overall input size ... excludes the 3 indexes
int record_size, // do not includes the 3 indexes
int md_x, int md_y, int md_z,
struct cube *d_xyzp, // cube in device // will have the size of the
int hvt_size,
int from_block_size ) ; // new
template int
h_do_motion_detection_step0_v2<float> ( float *fromp, float *top,
int tbl_size, // overall input size ... excludes the 3 indexes
int record_size, // do not includes the 3 indexes
int md_x, int md_y, int md_z,
struct cube *d_xyzp, // cube in device // will have the size of the
int hvt_size,
int from_block_size ) ; // new
// 3 indexes + real data length == record_length
// loopcnt ... number of records in each blk
// tbl_size ... number of records in blk_x * blk_y blks
__global__ void d_do_motion_idx_v2 ( int *dp, int tbl_size, int loopcnt,
int record_length,
int h_loop, int t_loop, int hv_size,
int blk_in_x, int blk_in_y )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int ot_idx, blk_idx, blk, i, j, k ;
int *odp ;
int *ip ;
#ifdef CUDA_OBS
if ( t_idx == 0 )
{
*dp++ = tbl_size ;
*dp++ = loopcnt ;
*dp++ = record_length ;
*dp++ = h_loop ;
*dp++ = t_loop ;
*dp++ = hv_size ;
*dp++ = blk_in_x ;
*dp++ = blk_in_y ;
}
#endif
ot_idx = t_idx ;
odp = dp ;
while ( t_idx < tbl_size )
{
blk_idx = t_idx / loopcnt ; // which block
dp = odp ;
// j = ( t_idx % loopcnt ) % hv_size ;
i = blk_idx / blk_in_x ;
k = blk_idx % blk_in_x ;
if (( i == 0 ) || ( i == ( blk_in_y - 1 )))
{
if (( k == 0 ) || ( k == ( blk_in_x - 1 )))
blk = CUBE_INFO_CORNER ;
else
blk = CUBE_INFO_SIDE ;
} else
{
if (( k == 0 ) || ( k == ( blk_in_x - 1 )))
blk = CUBE_INFO_SIDE ;
else
blk = CUBE_INFO_INNER ;
}
dp += t_idx * record_length ;
t_idx -= blk_idx * loopcnt ;
if ( t_idx == ( loopcnt - 1 ))
{
ip = ( int * ) dp ;
*ip++ = CUBE_INFO_SET( blk ) ; // tmporal
*ip++ = (( hv_size / h_loop ) - 1 ) / 2 ;
*ip++ = ( h_loop - 1 ) / 2 ;
} else
{
ip = ( int * ) dp ;
*ip++ = (( t_idx / hv_size ) + 1 ) | CUBE_INFO_SET( blk ) ; // tmporal
j = t_idx % hv_size ;
*ip++ = j / h_loop ; // vertical
*ip++ = j % h_loop ; // horizontal
}
// dp += NUM_OF_HVT_INDEX ; // assume float and int are of the same size ... QQQ ... FIX
ot_idx += CUDA_MAX_THREADS ;
t_idx = ot_idx ;
}
}
// total size is the buffer size ... might not be the same as used.
// md_x/y/z aer each side
int
h_do_motion_idx_v2 ( int *dp, int total_size,
int *orig_idx,
int blk_in_x, int blk_in_y, struct cube *cubep,
int md_x, int md_y, int md_z, int *record_sizep )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int record_length, k, i, nBlocks, loopcnt ;
// the record length is the largest record amongst the inner/side/corner blks
fprintf( stderr, "%s: total_size %d md x/y/z %d %d %d \n",
__func__, total_size, md_x, md_y, md_z ) ;
record_length = 0 ;
for ( i = 0 ; i < CUBE_INFO_CNT ; i++ )
{
k = ( cubep[i].x - md_x * 2 ) *
( cubep[i].y - md_y * 2 ) * ( cubep[i].z - md_z + 1 ) ;
#ifdef CUDA_DBG
fprintf(stderr, "%s: i %d cube %d %d %d k %d \n",
__func__, i, cubep[i].x, cubep[i].y, cubep[i].z, k ) ;
#endif
if ( k > record_length )
record_length = k ;
}
*record_sizep = record_length ;
record_length += NUM_OF_HVT_INDEX ; // 3 indexes .. t/v/h in the beginning ...
// the last record entry ( i.e. loopcnt -1 ) has a different format ...
// ck the device code, when t_idx, ( loopcnt - 1)
loopcnt = ( md_x * 2 + 1 ) * ( md_y * 2 + 1 ) * ( md_z - 1 ) + 1 ;
i = record_length * loopcnt * blk_in_x * blk_in_y ;
if ( i > total_size )
{
fprintf( stderr, "%s: size needed %d got %d\n",
__func__, i, total_size ) ;
return ( 0 ) ;
}
i /= record_length ;
// nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ;
fprintf( stderr, "%s: loopcnt %d i %d rec %d md %d %d %d blk x/y %d %d\n",
__func__, loopcnt, i, record_length, md_x, md_y, md_z, blk_in_x,
blk_in_y ) ;
hipLaunchKernelGGL(( d_do_motion_idx_v2) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp, i, loopcnt, record_length, ( md_x * 2 + 1 ), md_z,
( md_x * 2 + 1 ) * ( md_y * 2 + 1 ),
blk_in_x, blk_in_y ) ;
hipDeviceSynchronize() ;
// *orig_idx = md_y * ( md_x * 2 + 1 ) + md_x ;
*orig_idx = loopcnt - 1 ;
return ( 1 ) ;
}
// step one is to get y0-yk
// no need to worry about the different cube size ... inner/side/corner
// it will be all junk anyway ...
template<typename T>
__global__ void d_do_l1_norm_step1_v2 ( T *dp, int tbl_size, int record_length,
int orig, int hvt_size )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, j ;
T *odp, *op ;
odp = dp ;
while ( t_idx < tbl_size )
{
dp = odp ;
j = t_idx / record_length ;
i = j / hvt_size ; // index into the hvt_size size block
j %= hvt_size ; // record index into the current block
if ( j != orig )
{
dp += i * hvt_size * ( record_length + NUM_OF_HVT_INDEX ) ;
op = dp + orig * ( record_length + NUM_OF_HVT_INDEX ) + NUM_OF_HVT_INDEX ;
i = t_idx % record_length ;
op += i ;
dp = dp + j * ( record_length + NUM_OF_HVT_INDEX ) +
NUM_OF_HVT_INDEX + i ;
*dp -= *op ;
if ( *dp < 0 )
*dp = -*dp ; // save a step ... no need to abs()
}
t_idx += CUDA_MAX_THREADS ;
}
}
// now make all entries positive for orig
// table_size is the all the entries of all orig in all ( nblk_in_x * nblk_in_y ) block
template<typename T>
__global__ void d_do_l1_norm_step1_1_v2 ( T *dp, int tbl_size, int record_length,
int orig, int hvt_size )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int j ;
T *odp ;
odp = dp ;
while ( t_idx < tbl_size )
{
dp = odp ;
j = t_idx / record_length ; // which hvt_size block
dp += ( j * hvt_size + orig ) * ( record_length + NUM_OF_HVT_INDEX ) +
( t_idx % record_length ) + NUM_OF_HVT_INDEX ;
if ( *dp < 0 )
*dp = -*dp ; // save a step ... no need to abs()
t_idx += CUDA_MAX_THREADS ;
}
}
// total and record_size does not have the NUM_OF_HVT_INDEX elements
template<typename T>
int
h_do_l1_norm_step1_v2( T *dp, int total, int record_size, int orig, int hvt_size)
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( total + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
if (( total % record_size ) || ( total % ( hvt_size * record_size )))
{
fprintf( stderr, "%s: error total %d rec %d hvt %d \n",
__func__, total, record_size, hvt_size ) ;
return ( 0 ) ;
}
h_block_adj ( total, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_l1_norm_step1_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp, total, record_size, orig, hvt_size ) ;
hipDeviceSynchronize() ;
total = ( total / ( record_size * hvt_size )) * record_size ;
// total = nblk_x * nbli_y * record_size
// nBlocks = ( total + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( total, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_l1_norm_step1_1_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp, total, record_size, orig, hvt_size ) ;
hipDeviceSynchronize() ;
return ( 1 ) ;
}
template int
h_do_l1_norm_step1_v2<int>( int *dp, int total, int record_size, int orig, int hvt_size) ;
template int
h_do_l1_norm_step1_v2<float>( float *dp, int total, int record_size, int orig, int hvt_size) ;
// step two is to get L1-norm(sum)
// all row, should be after the abs() is done
// tbl_size is the number of elements for this addition operation
// record_length includes the NUM_OF_HVT_INDEX
// cnt is the max_cnt for each record, regardless inner/side/corner
template<typename T>
__global__ void d_do_l1_norm_step2_v2 ( T *dp, int tbl_size, int record_length,
int cnt, struct cube *d_xyzp, int *d_resp )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int *ip, record_type, start, current_cnt , j ;
long long l, ll ;
T *fp, *odp ;
odp = dp ;
while ( t_idx < tbl_size )
{
dp = odp ;
j = t_idx / cnt ;
dp += record_length * j ;
ip = ( int * )dp ;
record_type = CUBE_INFO_GET( *ip ) ;
start = d_xyzp[ record_type ].z ;
current_cnt = d_xyzp[ record_type ].y ;
j = t_idx % cnt ;
if ( current_cnt > j )
{
dp += NUM_OF_HVT_INDEX ;
fp = dp + start ;
// this check is needed when *dp is int
{
l = dp[ j ] ;
ll = fp [ j ] ;
l += ll ;
if ( l & 0xffffffff00000000 )
*d_resp = t_idx ;
}
dp[ j ] += fp [ j ] ;
}
t_idx += CUDA_MAX_THREADS ;
}
}
// step 1.1 should be the abs() ... not needed, done in step 1
// step 2 is to do the sum
// record_size does not have the NUM_OF_HVT_INDEX elements
// total is the overall number of data elements, no NUM_OF_HVT_INDEX
// NOTE d_xyzp->y/z will be destroyed ... x
// hcubep: has been adjusted to the after md_x/y/z size ;
template<typename T>
int
h_do_l1_norm_step2_v2( T *dp, int total, int record_size,
struct cube *hcubep, struct cube *d_xyzp, int *d_resp )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks, i, start, row, cnt ;
int max_cnt ;
struct cube cxyz[3] ;
#ifdef CUDA_OBS
fprintf( stderr, "%s: dp %p total %d record %d \n",
__func__, dp, total, record_size ) ;
#endif
max_cnt = 0xdeadbeef ;
if ( !put_d_data_i ( d_resp, &max_cnt, sizeof ( int )))
{
fprintf( stderr, "%s: put data failed d_resp %p \n", __func__, d_resp ) ;
}
memcpy ( cxyz, hcubep, sizeof ( cxyz )) ;
max_cnt = 0 ;
for ( i = 0 ; i < CUBE_INFO_CNT ; i++ )
{
// y is cnt ... z is start ...
cnt = cxyz[i].y = hcubep[i].x * hcubep[i].y * hcubep[i].z ; // size
start = max_log2( cnt ) ;
if ( start != cnt )
start = max_log2(( start / 2 ) - 1 ) ;
else
start >>= 1 ;
cxyz[i].z = start ;
cxyz[i].y -= start ;
if ( max_cnt < cxyz[i].y )
max_cnt = cxyz[i].y ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: i %d z %d y %d max %d cnt %d \n",
__func__, i, cxyz[i].z, cxyz[i].y, max_cnt, cnt ) ;
#endif
}
h_set_cube_config ( d_xyzp, cxyz ) ;
row = total / record_size ;
if ( total % record_size )
{
fprintf( stderr, "h_do_l1_norm_step2_v2: error size %d %d \n", total, record_size ) ;
return ( 0 ) ;
}
while ( max_cnt > 0 )
{
i = row * max_cnt ;
#ifdef CUDA_DBG
fprintf( stderr, "row %d cnt %d i %d \n", row, max_cnt, i ) ;
#endif
// nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_l1_norm_step2_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp, i, record_size + NUM_OF_HVT_INDEX,
max_cnt, d_xyzp, d_resp ) ;
hipDeviceSynchronize() ;
if ( !get_d_data_i ( d_resp, &i, sizeof ( int )))
{
fprintf( stderr, "%s: get data failed \n", __func__ ) ;
}
if ( i != 0xdeadbeef )
fprintf( stderr, "%s: overflow error return %x \n", __func__, i ) ;
max_cnt = 0 ;
for ( i = 0 ; i < CUBE_INFO_CNT ; i++ )
{
cxyz[i].z >>= 1 ;
cxyz[i].y = cxyz[i].z ;
if ( max_cnt < cxyz[i].y )
max_cnt = cxyz[i].y ;
#ifdef CUDA_DBG
fprintf(stderr,"%s-2: i %d z %d y %d max %d \n",
__func__, i, cxyz[i].z, cxyz[i].y, max_cnt ) ;
#endif
}
h_set_cube_config ( d_xyzp, cxyz ) ;
}
h_set_cube_config ( d_xyzp, hcubep ) ;
return ( 1 ) ;
}
template<> int
h_do_l1_norm_step2_v2<int>( int *dp, int total, int record_size,
struct cube *hcubep, struct cube *d_xyzp, int *d_resp ) ;
template int
h_do_l1_norm_step2_v2<float>( float *dp, int total, int record_size,
struct cube *hcubep, struct cube *d_xyzp, int *d_resp ) ;
#define MAX_L1_NORM 1000
// step 3 is to get 1-|y0-yk|/|y0|
// row_size is the number of rows ...
// record_length includes the NUM_OF_HVT_INDEX
// dp starts with valid data, see caller
template<typename T>
__global__ void d_do_l1_norm_step3_v2 ( T *dp, int row_size, int record_length,
int orig, int hvt_size )
{
int ot_idx, i, t_idx = blockIdx.x * blockDim.x + threadIdx.x;
T *op, *odp ;
odp = dp ;
ot_idx = t_idx ;
while ( t_idx < row_size )
{
dp = odp ;
i = t_idx / hvt_size ; // which block
t_idx -= i * hvt_size ; // which record in block
// skip the orig
if ( t_idx != orig )
{
dp += ( i * hvt_size ) * record_length ;
op = dp + orig * record_length ;
dp += t_idx * record_length ;
// FIX ... if no int ... then there is no such problem
// *dp = MAX_L1_NORM - ( MAX_L1_NORM * ( *dp )) / (*op) ;
*dp = ((T)MAX_L1_NORM) - ( T )((( float )MAX_L1_NORM ) * ((( float )*dp ) / (( float ) *op ))) ;
}
ot_idx += CUDA_MAX_THREADS ;
t_idx = ot_idx ;
}
}
// record_size does not have the NUM_OF_HVT_INDEX elements
// total is the overall number of data elements, no NUM_OF_HVT_INDEX
template<typename T>
int
h_do_l1_norm_step3_v2( T *dp, int total, int record_size, int orig, int hvt_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int i, nBlocks ;
if ( total % record_size )
{
fprintf( stderr, "h_do_l1_norm_step3_v2: error size %d %d \n", total, record_size ) ;
return ( 0 ) ;
}
i = total / record_size ;
if ( i % hvt_size )
{
fprintf( stderr, "h_do_l1_norm_step3_v2: error row %d hvt %d \n", i, hvt_size ) ;
return ( 0 ) ;
}
// nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_l1_norm_step3_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp + NUM_OF_HVT_INDEX, i, record_size + NUM_OF_HVT_INDEX,
orig, hvt_size ) ;
hipDeviceSynchronize() ;
return ( 1 ) ;
}
template int
h_do_l1_norm_step3_v2<int>( int *dp, int total, int record_size, int orig, int hvt_size ) ;
template int
h_do_l1_norm_step3_v2<float>( float *dp, int total, int record_size, int orig, int hvt_size ) ;
// to find the max of each hvt_size block
// record_length includes the NUM_OF_HVT_INDEX
// dp starts before the NUM_OF_HVT_INDEX, see caller
// total is the number of hvt_blocks
template<typename T>
__global__ void d_do_l1_norm_step4_3_v2 ( T *dp, int total, int record_length,
int hvt_size, int orig )
{
int *fip, *tip, t_idx = blockIdx.x * blockDim.x + threadIdx.x;
T *odp, *fp ;
odp = dp ;
while ( t_idx < total )
{
dp = odp ;
fp = dp + t_idx * record_length * hvt_size ;
dp += t_idx * L1_NORM_STEP4_RETURN_ENTRY_SIZE * 2 ; // first is the max, secondis no motion
tip = ( int *)dp ;
fip = ( int *)fp ;
*tip++ = *fip++ & CUBE_INFO_T_MSK ;
*tip++ = *fip++ ;
*tip++ = *fip++ ;
*tip++ = *fip ;
fp = odp + ( t_idx * hvt_size + orig ) * record_length ;
fip = ( int *)fp ;
*tip++ = *fip++ & CUBE_INFO_T_MSK ;
*tip++ = *fip++ ;
*tip++ = *fip++ ;
*tip++ = *fip ;
t_idx += CUDA_MAX_THREADS ;
}
}
// to find the max of each hvt_size block
// record_length includes the NUM_OF_HVT_INDEX
// dp starts with idx 0 after NUM_OF_HVT_INDEX, see caller
// cnt is the number of rows need to be processed in this hvt_size rows
// total is the number of entries in ( hvt_size * cnt ) need to be
// processed at this run
template<typename T>
__global__ void d_do_l1_norm_step4_2_v2 ( T *dp, int total, int record_length,
int start, int cnt, int hvt_size )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, *tip, *fip ;
T *tp, *odp ;
odp = dp ;
while ( t_idx < total )
{
dp = odp ;
i = t_idx / cnt ; // number of the hvt_size block
dp += i * hvt_size * record_length ;
// dp points to the first row in this hvt_size block
i = t_idx % cnt ; // index into this hvt_size block after start
tp = dp + i * record_length ; // destination
dp = tp + start * record_length ;
if ( *tp < *dp )
{
tip = ( int * )tp ;
fip = ( int * )dp ;
*tip-- = *fip-- ; // value // float or int QQQ ???
*tip-- = *fip-- ; // h
*tip-- = *fip-- ; // v
*tip = *fip ; // t
}
t_idx += CUDA_MAX_THREADS ;
}
}
// step 4.1: move the no_motion_row to the orig ...
// total is hvt_size * blk_in_x * blk_in_y i.e. total is the number of records
// record_length has the NUM_OF_HVT_INDEX
// dp points to the correct data space behind NUM_OF_HVT_INDEX
// no_motion_idx is the block right after the orig in t-domain and no
// shift in the h/v direction
template<typename T>
__global__ void d_do_l1_norm_step4_1_v2 ( T *dp, int total, int record_length,
int orig, int hvt_size, int no_motion_idx )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int *tip, *fip, i ;
T *tdp, *odp ;
odp = dp ;
while ( t_idx < total )
{
dp = odp ;
i = t_idx % hvt_size ; // which record in the block
dp += t_idx * record_length ;
if ( i != orig )
{
if ( i == no_motion_idx )
{
// ok now the no motion one is in the orig "row" ...
// this is the orig row ...
tdp = dp + ( orig - no_motion_idx ) * record_length ;
tip = ( int *)tdp ;
fip = ( int *)dp ;
*tip-- = *fip-- ; // value
*tip-- = *fip-- ; // h
*tip-- = *fip-- ; // v
*tip = *fip ; // t
#ifdef CUDA_OBS // let the smallest negative number, when all numbers are negative, wins.
} else if ( *dp < 0 )
*dp = 0 ;
#else
}
#endif
}
t_idx += CUDA_MAX_THREADS ;
}
}
// total is overall data area
// record_size does not include NUM_OF_HVT_INDEX
// orig: the block that every "moving" blocks compared with
template<typename T>
int
h_do_l1_norm_step4_v2( T *dp, int total, int record_size, int orig,
int hvt_size, int *resp, int no_motion_idx )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int blocks, nBlocks, i, start, row, cnt ;
#ifdef CUDA_OBS
fprintf( stderr, "%s: dp %p total %d rec %d orig %d hvt %d resp %p\n",
__func__, total, total, record_size, orig, hvt_size, resp ) ;
#endif
if ( total % record_size )
{
fprintf( stderr, "%s: error size %d %d \n", __func__, total,
record_size ) ;
return ( 0 ) ;
}
row = total / record_size ;
if ( row % hvt_size )
{
fprintf( stderr, "%s: error hvt %d row %d \n", __func__,
hvt_size, row ) ;
return ( 0 ) ;
}
blocks = row / hvt_size ; // i.e. blk_in_x * blk_in_y
// step 4.1 ... 0 out all orig and negative entries ...
// nBlocks= ( row + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( row, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_l1_norm_step4_1_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp + NUM_OF_HVT_INDEX, row,
record_size + NUM_OF_HVT_INDEX, orig, hvt_size, no_motion_idx ) ;
hipDeviceSynchronize() ;
printf("%s : step 4.1 done \n", __func__ ) ;
// step 4.2 ... get the max
start = max_log2( hvt_size ) ;
if ( start != hvt_size )
start = max_log2(( start / 2 ) - 1 ) ;
else
start >>= 1 ;
cnt = hvt_size - start ;
while ( cnt > 0 )
{
#ifdef CUDA_DBG
printf("%s : row %d cnt %d start %d\n", __func__, row, cnt, start ) ;
#endif
// nBlocks= ( cnt * blocks + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( cnt * blocks, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_l1_norm_step4_2_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp + NUM_OF_HVT_INDEX, cnt * blocks, record_size + NUM_OF_HVT_INDEX,
start, cnt, hvt_size ) ;
hipDeviceSynchronize() ;
start >>= 1 ;
cnt = start ;
}
printf("%s : step 4.2 done \n", __func__ ) ;
// nBlocks = ( blocks + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( blocks, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_l1_norm_step4_3_v2<T>) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
dp, blocks, record_size + NUM_OF_HVT_INDEX, hvt_size, orig ) ;
hipDeviceSynchronize() ;
// 2: is the t/v/h/value for best one and the no move one
if (( blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2 ) > (( record_size +
NUM_OF_HVT_INDEX ) * hvt_size ))
{
fprintf(stderr, "%s: error: size mismatch %ld %ld\n", __func__,
(unsigned long)( blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2 ),
(unsigned long)(( record_size + NUM_OF_HVT_INDEX ) * hvt_size )) ;
return ( 0 ) ;
}
printf("%s : step 4.3.1 done \n", __func__ ) ;
printf("%s: outbuf %p device %p blks %d size %ld\n", __func__,
resp, dp, blocks,
(unsigned long)(blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2 )) ;
if (( i = hipMemcpy( resp, dp,
blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2,
// one for max, one for no motion
hipMemcpyDeviceToHost)) != hipSuccess )
{
fprintf(stderr, "%s: memcpy failed %d\n", __func__, i ) ;
return ( 0 ) ;
}
printf("%s : step 4.3 done \n", __func__ ) ;
return ( 1 ) ;
}
template int
h_do_l1_norm_step4_v2<int>( int *dp, int total, int record_size, int orig,
int hvt_size, int *resp, int no_motion_idx ) ;
template int
h_do_l1_norm_step4_v2<float>( float *dp, int total, int record_size, int orig,
int hvt_size, int *resp, int no_motion_idx ) ;
| 65c93b9841c61354c3e606296f578cd44fb9f719.cu | #include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_header.h"
#include "cs_dbg.h"
#include "cs_cuda.h"
#include "cs_helper.h"
#include "cs_motion_detect_v2.h"
#include "cs_analysis.h"
/*
Fri Apr 24 17:46:50 EDT 2015
version 2 will only match the first md block with the subsequent md_z -1 block to do
the L1 norm calculation. which works fine, but not too accurate, v3 will change that
to compare the "center" md blocks across the termperal domain
*/
#define CUDA_DBG
// #define CUDA_OBS
// md_x/y/z: total size ... so it is md_x'*2, md_y'*2 and md_z
// tbl_size, // does not include the 3 indexes
// record_size, // do not include the 3 indexes
// hvt_size, // number of combination of h/v/t
// from_blk_size, // the size of the input inner block ... after edge
template<typename T>
__global__ void d_do_motion_detection_step0_v2 (
T *fdp, T *tdp,
int tbl_size,
int record_size,
int hvt_size,
int md_x, int md_y, int md_z,
struct cube *dxyzp,
int from_blk_size )
{
int mx, mxy_size ;
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int ot_idx, blk_idx, blk_size, blk_type_idx, cx, cxy_size,
md_z_len, i, j, from, h, v, t, tt, hh, vv ;
T *ofdp, *otdp ;
int *ip ;
#ifdef CUDA_OBS
int *dbp ;
#endif
#ifdef CUDA_OBS
dbp = fdp ;
#endif
ot_idx = t_idx ;
ofdp = fdp ;
otdp = tdp ;
while ( t_idx < tbl_size )
{
fdp = ofdp ;
tdp = otdp ;
blk_size = hvt_size * record_size ; // LDL can be moved out of while loop
blk_idx = t_idx / blk_size ;
t_idx -= blk_idx * blk_size ; // index into this block
// tdp is pointing at the beginning of the to-block
tdp += blk_idx * ( record_size + NUM_OF_HVT_INDEX ) * hvt_size ;
ip = ( int * ) tdp ;
j = *ip ;
blk_type_idx = CUBE_INFO_GET( j ) ;
// from block info
cx = dxyzp[ blk_type_idx ].x ;
cxy_size = cx * dxyzp[ blk_type_idx ].y ;
// to block info
mx = cx - md_x ;
mxy_size = mx * ( dxyzp[ blk_type_idx ].y - md_y ) ;
md_z_len = dxyzp[ blk_type_idx ].z - md_z + 1 ;
#ifdef CUDA_OBS
*dbp++ = blk_type_idx ;
*dbp++ = cx ;
*dbp++ = cxy_size ;
*dbp++ = blk_idx ;
*dbp++ = mx ;
*dbp++ = mxy_size ;
*dbp++ = t_idx ;
*dbp++ = from_blk_size ;
*dbp++ = md_z_len ;
#endif
fdp += blk_idx * from_blk_size ; // adjust the from
// now ftp is pointing at the beginning of the from-block
// starts from here ... same as single block func in version 1
i = t_idx / record_size ;
tdp += i * ( record_size + NUM_OF_HVT_INDEX ) ; // beginning of record
ip = ( int * ) tdp ;
t = ( *ip++ ) & CUBE_INFO_T_MSK ;
v = *ip++ ;
h = *ip++ ;
tdp += NUM_OF_HVT_INDEX ;
t_idx %= record_size ; // inside this record
tt = t_idx / mxy_size ; // which frame
if ( tt < md_z_len )
{
j = t_idx % mxy_size ;
hh = j % mx ; // which h
vv = j / mx ; // which v
// from = t * cxy_size + ( v + vv ) * cx + h + hh ;
// serial from = ( t + tt ) * mxy_size + ( v + vv ) * mx + h + hh ;
from = ( t + tt ) * cxy_size + ( v + vv ) * cx + h + hh ;
#ifdef CUDA_OBS
*dbp++ = 2222222 ;
*dbp++ = t ;
*dbp++ = tt ;
*dbp++ = i ;
*dbp++ = t ;
*dbp++ = v ;
*dbp++ = h ;
*dbp++ = t_idx ;
*dbp++ = tt ;
*dbp++ = hh ;
*dbp++ = vv ;
*dbp++ = from ;
*dbp++ = fdp[ from ] ;
*dbp++ = 99999999 ;
#endif
tdp[ t_idx ] = fdp[ from ] ;
}
#ifdef CUDA_DBG
else
tdp[ t_idx ] = 2222 ;
#endif
ot_idx += CUDA_MAX_THREADS ;
t_idx = ot_idx ;
}
}
// step0: copy the data into the motion array ...
// block : the result of do_motion_idx <- edge-detection <- L-selection
// cube : the cube that is going to be moved by all h/v/t units
// fromp has the edged data ... top has the TVH index
// md_x is csc.md_x * 2, same to md_y. md_z is the same as csc.md_z
template<typename T>
int
h_do_motion_detection_step0_v2 ( T *fromp, T *top,
int tbl_size, // overall input size ... excludes the 3 indexes
int record_size, // do not includes the 3 indexes
int md_x, int md_y, int md_z,
struct cube *d_xyzp, // cube in device // will have the size of the
int hvt_size,
int from_block_size ) // new
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; //= ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: fromp %p to %p size %d record %d mdxyz %d %d %d "
"hvt %d from_block_size %d\n",
__func__, fromp, top, tbl_size, record_size, md_x, md_y, md_z,
hvt_size, from_block_size ) ;
dbg_p_d_data_i ("xyz_table", ( int *)d_xyzp, CUBE_INFO_CNT * sizeof ( *d_xyzp ) / sizeof( int )) ;
#endif
#ifdef CUDA_OBS
if (( tbl_size % number_blocks ) || ( tbl_size % cube_xy ) ||
( record_size % cube_xy ))
{
fprintf(stderr, "%s: error size %d cube %d rec %d nblks %d\n",
__func__, tbl_size, cube_xy, record_size, number_blocks ) ;
return ( 0 ) ;
}
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_motion_detection_step0_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
fromp, top,
tbl_size, // does not include the 3 indexes
record_size, // do not include the 3 indexes
hvt_size, // number of combination of h/v/t
md_x, md_y, md_z,
d_xyzp, from_block_size ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("motion_detect", top, tbl_size ) ;
#endif
return ( 1 ) ;
}
template int
h_do_motion_detection_step0_v2<int> ( int *fromp, int *top,
int tbl_size, // overall input size ... excludes the 3 indexes
int record_size, // do not includes the 3 indexes
int md_x, int md_y, int md_z,
struct cube *d_xyzp, // cube in device // will have the size of the
int hvt_size,
int from_block_size ) ; // new
template int
h_do_motion_detection_step0_v2<float> ( float *fromp, float *top,
int tbl_size, // overall input size ... excludes the 3 indexes
int record_size, // do not includes the 3 indexes
int md_x, int md_y, int md_z,
struct cube *d_xyzp, // cube in device // will have the size of the
int hvt_size,
int from_block_size ) ; // new
// 3 indexes + real data length == record_length
// loopcnt ... number of records in each blk
// tbl_size ... number of records in blk_x * blk_y blks
__global__ void d_do_motion_idx_v2 ( int *dp, int tbl_size, int loopcnt,
int record_length,
int h_loop, int t_loop, int hv_size,
int blk_in_x, int blk_in_y )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int ot_idx, blk_idx, blk, i, j, k ;
int *odp ;
int *ip ;
#ifdef CUDA_OBS
if ( t_idx == 0 )
{
*dp++ = tbl_size ;
*dp++ = loopcnt ;
*dp++ = record_length ;
*dp++ = h_loop ;
*dp++ = t_loop ;
*dp++ = hv_size ;
*dp++ = blk_in_x ;
*dp++ = blk_in_y ;
}
#endif
ot_idx = t_idx ;
odp = dp ;
while ( t_idx < tbl_size )
{
blk_idx = t_idx / loopcnt ; // which block
dp = odp ;
// j = ( t_idx % loopcnt ) % hv_size ;
i = blk_idx / blk_in_x ;
k = blk_idx % blk_in_x ;
if (( i == 0 ) || ( i == ( blk_in_y - 1 )))
{
if (( k == 0 ) || ( k == ( blk_in_x - 1 )))
blk = CUBE_INFO_CORNER ;
else
blk = CUBE_INFO_SIDE ;
} else
{
if (( k == 0 ) || ( k == ( blk_in_x - 1 )))
blk = CUBE_INFO_SIDE ;
else
blk = CUBE_INFO_INNER ;
}
dp += t_idx * record_length ;
t_idx -= blk_idx * loopcnt ;
if ( t_idx == ( loopcnt - 1 ))
{
ip = ( int * ) dp ;
*ip++ = CUBE_INFO_SET( blk ) ; // tmporal
*ip++ = (( hv_size / h_loop ) - 1 ) / 2 ;
*ip++ = ( h_loop - 1 ) / 2 ;
} else
{
ip = ( int * ) dp ;
*ip++ = (( t_idx / hv_size ) + 1 ) | CUBE_INFO_SET( blk ) ; // tmporal
j = t_idx % hv_size ;
*ip++ = j / h_loop ; // vertical
*ip++ = j % h_loop ; // horizontal
}
// dp += NUM_OF_HVT_INDEX ; // assume float and int are of the same size ... QQQ ... FIX
ot_idx += CUDA_MAX_THREADS ;
t_idx = ot_idx ;
}
}
// total size is the buffer size ... might not be the same as used.
// md_x/y/z aer each side
int
h_do_motion_idx_v2 ( int *dp, int total_size,
int *orig_idx,
int blk_in_x, int blk_in_y, struct cube *cubep,
int md_x, int md_y, int md_z, int *record_sizep )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int record_length, k, i, nBlocks, loopcnt ;
// the record length is the largest record amongst the inner/side/corner blks
fprintf( stderr, "%s: total_size %d md x/y/z %d %d %d \n",
__func__, total_size, md_x, md_y, md_z ) ;
record_length = 0 ;
for ( i = 0 ; i < CUBE_INFO_CNT ; i++ )
{
k = ( cubep[i].x - md_x * 2 ) *
( cubep[i].y - md_y * 2 ) * ( cubep[i].z - md_z + 1 ) ;
#ifdef CUDA_DBG
fprintf(stderr, "%s: i %d cube %d %d %d k %d \n",
__func__, i, cubep[i].x, cubep[i].y, cubep[i].z, k ) ;
#endif
if ( k > record_length )
record_length = k ;
}
*record_sizep = record_length ;
record_length += NUM_OF_HVT_INDEX ; // 3 indexes .. t/v/h in the beginning ...
// the last record entry ( i.e. loopcnt -1 ) has a different format ...
// ck the device code, when t_idx, ( loopcnt - 1)
loopcnt = ( md_x * 2 + 1 ) * ( md_y * 2 + 1 ) * ( md_z - 1 ) + 1 ;
i = record_length * loopcnt * blk_in_x * blk_in_y ;
if ( i > total_size )
{
fprintf( stderr, "%s: size needed %d got %d\n",
__func__, i, total_size ) ;
return ( 0 ) ;
}
i /= record_length ;
// nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ;
fprintf( stderr, "%s: loopcnt %d i %d rec %d md %d %d %d blk x/y %d %d\n",
__func__, loopcnt, i, record_length, md_x, md_y, md_z, blk_in_x,
blk_in_y ) ;
d_do_motion_idx_v2 <<< nBlocks, nThreadsPerBlock >>> (
dp, i, loopcnt, record_length, ( md_x * 2 + 1 ), md_z,
( md_x * 2 + 1 ) * ( md_y * 2 + 1 ),
blk_in_x, blk_in_y ) ;
cudaThreadSynchronize() ;
// *orig_idx = md_y * ( md_x * 2 + 1 ) + md_x ;
*orig_idx = loopcnt - 1 ;
return ( 1 ) ;
}
// step one is to get y0-yk
// no need to worry about the different cube size ... inner/side/corner
// it will be all junk anyway ...
template<typename T>
__global__ void d_do_l1_norm_step1_v2 ( T *dp, int tbl_size, int record_length,
int orig, int hvt_size )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, j ;
T *odp, *op ;
odp = dp ;
while ( t_idx < tbl_size )
{
dp = odp ;
j = t_idx / record_length ;
i = j / hvt_size ; // index into the hvt_size size block
j %= hvt_size ; // record index into the current block
if ( j != orig )
{
dp += i * hvt_size * ( record_length + NUM_OF_HVT_INDEX ) ;
op = dp + orig * ( record_length + NUM_OF_HVT_INDEX ) + NUM_OF_HVT_INDEX ;
i = t_idx % record_length ;
op += i ;
dp = dp + j * ( record_length + NUM_OF_HVT_INDEX ) +
NUM_OF_HVT_INDEX + i ;
*dp -= *op ;
if ( *dp < 0 )
*dp = -*dp ; // save a step ... no need to abs()
}
t_idx += CUDA_MAX_THREADS ;
}
}
// now make all entries positive for orig
// table_size is the all the entries of all orig in all ( nblk_in_x * nblk_in_y ) block
template<typename T>
__global__ void d_do_l1_norm_step1_1_v2 ( T *dp, int tbl_size, int record_length,
int orig, int hvt_size )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int j ;
T *odp ;
odp = dp ;
while ( t_idx < tbl_size )
{
dp = odp ;
j = t_idx / record_length ; // which hvt_size block
dp += ( j * hvt_size + orig ) * ( record_length + NUM_OF_HVT_INDEX ) +
( t_idx % record_length ) + NUM_OF_HVT_INDEX ;
if ( *dp < 0 )
*dp = -*dp ; // save a step ... no need to abs()
t_idx += CUDA_MAX_THREADS ;
}
}
// total and record_size does not have the NUM_OF_HVT_INDEX elements
template<typename T>
int
h_do_l1_norm_step1_v2( T *dp, int total, int record_size, int orig, int hvt_size)
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( total + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
if (( total % record_size ) || ( total % ( hvt_size * record_size )))
{
fprintf( stderr, "%s: error total %d rec %d hvt %d \n",
__func__, total, record_size, hvt_size ) ;
return ( 0 ) ;
}
h_block_adj ( total, nThreadsPerBlock, &nBlocks ) ;
d_do_l1_norm_step1_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
dp, total, record_size, orig, hvt_size ) ;
cudaThreadSynchronize() ;
total = ( total / ( record_size * hvt_size )) * record_size ;
// total = nblk_x * nbli_y * record_size
// nBlocks = ( total + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( total, nThreadsPerBlock, &nBlocks ) ;
d_do_l1_norm_step1_1_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
dp, total, record_size, orig, hvt_size ) ;
cudaThreadSynchronize() ;
return ( 1 ) ;
}
template int
h_do_l1_norm_step1_v2<int>( int *dp, int total, int record_size, int orig, int hvt_size) ;
template int
h_do_l1_norm_step1_v2<float>( float *dp, int total, int record_size, int orig, int hvt_size) ;
// step two is to get L1-norm(sum)
// all row, should be after the abs() is done
// tbl_size is the number of elements for this addition operation
// record_length includes the NUM_OF_HVT_INDEX
// cnt is the max_cnt for each record, regardless inner/side/corner
template<typename T>
__global__ void d_do_l1_norm_step2_v2 ( T *dp, int tbl_size, int record_length,
int cnt, struct cube *d_xyzp, int *d_resp )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int *ip, record_type, start, current_cnt , j ;
long long l, ll ;
T *fp, *odp ;
odp = dp ;
while ( t_idx < tbl_size )
{
dp = odp ;
j = t_idx / cnt ;
dp += record_length * j ;
ip = ( int * )dp ;
record_type = CUBE_INFO_GET( *ip ) ;
start = d_xyzp[ record_type ].z ;
current_cnt = d_xyzp[ record_type ].y ;
j = t_idx % cnt ;
if ( current_cnt > j )
{
dp += NUM_OF_HVT_INDEX ;
fp = dp + start ;
// this check is needed when *dp is int
{
l = dp[ j ] ;
ll = fp [ j ] ;
l += ll ;
if ( l & 0xffffffff00000000 )
*d_resp = t_idx ;
}
dp[ j ] += fp [ j ] ;
}
t_idx += CUDA_MAX_THREADS ;
}
}
// step 1.1 should be the abs() ... not needed, done in step 1
// step 2 is to do the sum
// record_size does not have the NUM_OF_HVT_INDEX elements
// total is the overall number of data elements, no NUM_OF_HVT_INDEX
// NOTE d_xyzp->y/z will be destroyed ... x
// hcubep: has been adjusted to the after md_x/y/z size ;
template<typename T>
int
h_do_l1_norm_step2_v2( T *dp, int total, int record_size,
struct cube *hcubep, struct cube *d_xyzp, int *d_resp )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks, i, start, row, cnt ;
int max_cnt ;
struct cube cxyz[3] ;
#ifdef CUDA_OBS
fprintf( stderr, "%s: dp %p total %d record %d \n",
__func__, dp, total, record_size ) ;
#endif
max_cnt = 0xdeadbeef ;
if ( !put_d_data_i ( d_resp, &max_cnt, sizeof ( int )))
{
fprintf( stderr, "%s: put data failed d_resp %p \n", __func__, d_resp ) ;
}
memcpy ( cxyz, hcubep, sizeof ( cxyz )) ;
max_cnt = 0 ;
for ( i = 0 ; i < CUBE_INFO_CNT ; i++ )
{
// y is cnt ... z is start ...
cnt = cxyz[i].y = hcubep[i].x * hcubep[i].y * hcubep[i].z ; // size
start = max_log2( cnt ) ;
if ( start != cnt )
start = max_log2(( start / 2 ) - 1 ) ;
else
start >>= 1 ;
cxyz[i].z = start ;
cxyz[i].y -= start ;
if ( max_cnt < cxyz[i].y )
max_cnt = cxyz[i].y ;
#ifdef CUDA_DBG
fprintf( stderr, "%s: i %d z %d y %d max %d cnt %d \n",
__func__, i, cxyz[i].z, cxyz[i].y, max_cnt, cnt ) ;
#endif
}
h_set_cube_config ( d_xyzp, cxyz ) ;
row = total / record_size ;
if ( total % record_size )
{
fprintf( stderr, "h_do_l1_norm_step2_v2: error size %d %d \n", total, record_size ) ;
return ( 0 ) ;
}
while ( max_cnt > 0 )
{
i = row * max_cnt ;
#ifdef CUDA_DBG
fprintf( stderr, "row %d cnt %d i %d \n", row, max_cnt, i ) ;
#endif
// nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ;
d_do_l1_norm_step2_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
dp, i, record_size + NUM_OF_HVT_INDEX,
max_cnt, d_xyzp, d_resp ) ;
cudaThreadSynchronize() ;
if ( !get_d_data_i ( d_resp, &i, sizeof ( int )))
{
fprintf( stderr, "%s: get data failed \n", __func__ ) ;
}
if ( i != 0xdeadbeef )
fprintf( stderr, "%s: overflow error return %x \n", __func__, i ) ;
max_cnt = 0 ;
for ( i = 0 ; i < CUBE_INFO_CNT ; i++ )
{
cxyz[i].z >>= 1 ;
cxyz[i].y = cxyz[i].z ;
if ( max_cnt < cxyz[i].y )
max_cnt = cxyz[i].y ;
#ifdef CUDA_DBG
fprintf(stderr,"%s-2: i %d z %d y %d max %d \n",
__func__, i, cxyz[i].z, cxyz[i].y, max_cnt ) ;
#endif
}
h_set_cube_config ( d_xyzp, cxyz ) ;
}
h_set_cube_config ( d_xyzp, hcubep ) ;
return ( 1 ) ;
}
template<> int
h_do_l1_norm_step2_v2<int>( int *dp, int total, int record_size,
struct cube *hcubep, struct cube *d_xyzp, int *d_resp ) ;
template int
h_do_l1_norm_step2_v2<float>( float *dp, int total, int record_size,
struct cube *hcubep, struct cube *d_xyzp, int *d_resp ) ;
#define MAX_L1_NORM 1000
// step 3 is to get 1-|y0-yk|/|y0|
// row_size is the number of rows ...
// record_length includes the NUM_OF_HVT_INDEX
// dp starts with valid data, see caller
template<typename T>
__global__ void d_do_l1_norm_step3_v2 ( T *dp, int row_size, int record_length,
int orig, int hvt_size )
{
int ot_idx, i, t_idx = blockIdx.x * blockDim.x + threadIdx.x;
T *op, *odp ;
odp = dp ;
ot_idx = t_idx ;
while ( t_idx < row_size )
{
dp = odp ;
i = t_idx / hvt_size ; // which block
t_idx -= i * hvt_size ; // which record in block
// skip the orig
if ( t_idx != orig )
{
dp += ( i * hvt_size ) * record_length ;
op = dp + orig * record_length ;
dp += t_idx * record_length ;
// FIX ... if no int ... then there is no such problem
// *dp = MAX_L1_NORM - ( MAX_L1_NORM * ( *dp )) / (*op) ;
*dp = ((T)MAX_L1_NORM) - ( T )((( float )MAX_L1_NORM ) * ((( float )*dp ) / (( float ) *op ))) ;
}
ot_idx += CUDA_MAX_THREADS ;
t_idx = ot_idx ;
}
}
// record_size does not have the NUM_OF_HVT_INDEX elements
// total is the overall number of data elements, no NUM_OF_HVT_INDEX
template<typename T>
int
h_do_l1_norm_step3_v2( T *dp, int total, int record_size, int orig, int hvt_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int i, nBlocks ;
if ( total % record_size )
{
fprintf( stderr, "h_do_l1_norm_step3_v2: error size %d %d \n", total, record_size ) ;
return ( 0 ) ;
}
i = total / record_size ;
if ( i % hvt_size )
{
fprintf( stderr, "h_do_l1_norm_step3_v2: error row %d hvt %d \n", i, hvt_size ) ;
return ( 0 ) ;
}
// nBlocks= ( i + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( i, nThreadsPerBlock, &nBlocks ) ;
d_do_l1_norm_step3_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
dp + NUM_OF_HVT_INDEX, i, record_size + NUM_OF_HVT_INDEX,
orig, hvt_size ) ;
cudaThreadSynchronize() ;
return ( 1 ) ;
}
template int
h_do_l1_norm_step3_v2<int>( int *dp, int total, int record_size, int orig, int hvt_size ) ;
template int
h_do_l1_norm_step3_v2<float>( float *dp, int total, int record_size, int orig, int hvt_size ) ;
// to find the max of each hvt_size block
// record_length includes the NUM_OF_HVT_INDEX
// dp starts before the NUM_OF_HVT_INDEX, see caller
// total is the number of hvt_blocks
template<typename T>
__global__ void d_do_l1_norm_step4_3_v2 ( T *dp, int total, int record_length,
int hvt_size, int orig )
{
int *fip, *tip, t_idx = blockIdx.x * blockDim.x + threadIdx.x;
T *odp, *fp ;
odp = dp ;
while ( t_idx < total )
{
dp = odp ;
fp = dp + t_idx * record_length * hvt_size ;
dp += t_idx * L1_NORM_STEP4_RETURN_ENTRY_SIZE * 2 ; // first is the max, secondis no motion
tip = ( int *)dp ;
fip = ( int *)fp ;
*tip++ = *fip++ & CUBE_INFO_T_MSK ;
*tip++ = *fip++ ;
*tip++ = *fip++ ;
*tip++ = *fip ;
fp = odp + ( t_idx * hvt_size + orig ) * record_length ;
fip = ( int *)fp ;
*tip++ = *fip++ & CUBE_INFO_T_MSK ;
*tip++ = *fip++ ;
*tip++ = *fip++ ;
*tip++ = *fip ;
t_idx += CUDA_MAX_THREADS ;
}
}
// to find the max of each hvt_size block
// record_length includes the NUM_OF_HVT_INDEX
// dp starts with idx 0 after NUM_OF_HVT_INDEX, see caller
// cnt is the number of rows need to be processed in this hvt_size rows
// total is the number of entries in ( hvt_size * cnt ) need to be
// processed at this run
template<typename T>
__global__ void d_do_l1_norm_step4_2_v2 ( T *dp, int total, int record_length,
int start, int cnt, int hvt_size )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i, *tip, *fip ;
T *tp, *odp ;
odp = dp ;
while ( t_idx < total )
{
dp = odp ;
i = t_idx / cnt ; // number of the hvt_size block
dp += i * hvt_size * record_length ;
// dp points to the first row in this hvt_size block
i = t_idx % cnt ; // index into this hvt_size block after start
tp = dp + i * record_length ; // destination
dp = tp + start * record_length ;
if ( *tp < *dp )
{
tip = ( int * )tp ;
fip = ( int * )dp ;
*tip-- = *fip-- ; // value // float or int QQQ ???
*tip-- = *fip-- ; // h
*tip-- = *fip-- ; // v
*tip = *fip ; // t
}
t_idx += CUDA_MAX_THREADS ;
}
}
// step 4.1: move the no_motion_row to the orig ...
// total is hvt_size * blk_in_x * blk_in_y i.e. total is the number of records
// record_length has the NUM_OF_HVT_INDEX
// dp points to the correct data space behind NUM_OF_HVT_INDEX
// no_motion_idx is the block right after the orig in t-domain and no
// shift in the h/v direction
template<typename T>
__global__ void d_do_l1_norm_step4_1_v2 ( T *dp, int total, int record_length,
int orig, int hvt_size, int no_motion_idx )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int *tip, *fip, i ;
T *tdp, *odp ;
odp = dp ;
while ( t_idx < total )
{
dp = odp ;
i = t_idx % hvt_size ; // which record in the block
dp += t_idx * record_length ;
if ( i != orig )
{
if ( i == no_motion_idx )
{
// ok now the no motion one is in the orig "row" ...
// this is the orig row ...
tdp = dp + ( orig - no_motion_idx ) * record_length ;
tip = ( int *)tdp ;
fip = ( int *)dp ;
*tip-- = *fip-- ; // value
*tip-- = *fip-- ; // h
*tip-- = *fip-- ; // v
*tip = *fip ; // t
#ifdef CUDA_OBS // let the smallest negative number, when all numbers are negative, wins.
} else if ( *dp < 0 )
*dp = 0 ;
#else
}
#endif
}
t_idx += CUDA_MAX_THREADS ;
}
}
// total is overall data area
// record_size does not include NUM_OF_HVT_INDEX
// orig: the block that every "moving" blocks compared with
template<typename T>
int
h_do_l1_norm_step4_v2( T *dp, int total, int record_size, int orig,
int hvt_size, int *resp, int no_motion_idx )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int blocks, nBlocks, i, start, row, cnt ;
#ifdef CUDA_OBS
fprintf( stderr, "%s: dp %p total %d rec %d orig %d hvt %d resp %p\n",
__func__, total, total, record_size, orig, hvt_size, resp ) ;
#endif
if ( total % record_size )
{
fprintf( stderr, "%s: error size %d %d \n", __func__, total,
record_size ) ;
return ( 0 ) ;
}
row = total / record_size ;
if ( row % hvt_size )
{
fprintf( stderr, "%s: error hvt %d row %d \n", __func__,
hvt_size, row ) ;
return ( 0 ) ;
}
blocks = row / hvt_size ; // i.e. blk_in_x * blk_in_y
// step 4.1 ... 0 out all orig and negative entries ...
// nBlocks= ( row + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( row, nThreadsPerBlock, &nBlocks ) ;
d_do_l1_norm_step4_1_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
dp + NUM_OF_HVT_INDEX, row,
record_size + NUM_OF_HVT_INDEX, orig, hvt_size, no_motion_idx ) ;
cudaThreadSynchronize() ;
printf("%s : step 4.1 done \n", __func__ ) ;
// step 4.2 ... get the max
start = max_log2( hvt_size ) ;
if ( start != hvt_size )
start = max_log2(( start / 2 ) - 1 ) ;
else
start >>= 1 ;
cnt = hvt_size - start ;
while ( cnt > 0 )
{
#ifdef CUDA_DBG
printf("%s : row %d cnt %d start %d\n", __func__, row, cnt, start ) ;
#endif
// nBlocks= ( cnt * blocks + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( cnt * blocks, nThreadsPerBlock, &nBlocks ) ;
d_do_l1_norm_step4_2_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
dp + NUM_OF_HVT_INDEX, cnt * blocks, record_size + NUM_OF_HVT_INDEX,
start, cnt, hvt_size ) ;
cudaThreadSynchronize() ;
start >>= 1 ;
cnt = start ;
}
printf("%s : step 4.2 done \n", __func__ ) ;
// nBlocks = ( blocks + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
h_block_adj ( blocks, nThreadsPerBlock, &nBlocks ) ;
d_do_l1_norm_step4_3_v2<T> <<< nBlocks, nThreadsPerBlock >>> (
dp, blocks, record_size + NUM_OF_HVT_INDEX, hvt_size, orig ) ;
cudaThreadSynchronize() ;
// 2: is the t/v/h/value for best one and the no move one
if (( blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2 ) > (( record_size +
NUM_OF_HVT_INDEX ) * hvt_size ))
{
fprintf(stderr, "%s: error: size mismatch %ld %ld\n", __func__,
(unsigned long)( blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2 ),
(unsigned long)(( record_size + NUM_OF_HVT_INDEX ) * hvt_size )) ;
return ( 0 ) ;
}
printf("%s : step 4.3.1 done \n", __func__ ) ;
printf("%s: outbuf %p device %p blks %d size %ld\n", __func__,
resp, dp, blocks,
(unsigned long)(blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2 )) ;
if (( i = cudaMemcpy( resp, dp,
blocks * L1_NORM_STEP4_RETURN_ENTRY_SIZE * sizeof( int ) * 2,
// one for max, one for no motion
cudaMemcpyDeviceToHost)) != cudaSuccess )
{
fprintf(stderr, "%s: memcpy failed %d\n", __func__, i ) ;
return ( 0 ) ;
}
printf("%s : step 4.3 done \n", __func__ ) ;
return ( 1 ) ;
}
template int
h_do_l1_norm_step4_v2<int>( int *dp, int total, int record_size, int orig,
int hvt_size, int *resp, int no_motion_idx ) ;
template int
h_do_l1_norm_step4_v2<float>( float *dp, int total, int record_size, int orig,
int hvt_size, int *resp, int no_motion_idx ) ;
|
32017b5dab30ce9cc56bef720d1ad60fca5fbc73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2018 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include <thrust/memory.h>
#include "../../../src/common/device_helpers.cuh"
#include <xgboost/span.h>
#include "test_span.h"
namespace xgboost {
namespace common {
struct TestStatus {
private:
int *status_;
public:
TestStatus () {
dh::safe_cuda(hipMalloc(&status_, sizeof(int)));
int h_status = 1;
dh::safe_cuda(hipMemcpy(status_, &h_status,
sizeof(int), hipMemcpyHostToDevice));
}
~TestStatus() {
dh::safe_cuda(hipFree(status_));
}
int Get() {
int h_status;
dh::safe_cuda(hipMemcpy(&h_status, status_,
sizeof(int), hipMemcpyDeviceToHost));
return h_status;
}
int* Data() {
return status_;
}
};
__global__ void TestFromOtherKernel(Span<float> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
// Test converting different T
__global__ void TestFromOtherKernelConst(Span<float const, 16> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
/*!
* \brief Here we just test whether the code compiles.
*/
TEST(GPUSpan, FromOther) {
thrust::host_vector<float> h_vec (16);
std::iota(h_vec.begin(), h_vec.end(), 0);
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// dynamic extent
{
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestFromOtherKernel), dim3(1), dim3(16), 0, 0, span);
}
{
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestFromOtherKernelConst), dim3(1), dim3(16), 0, 0, span);
}
// static extent
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
hipLaunchKernelGGL(( TestFromOtherKernel), dim3(1), dim3(16), 0, 0, span);
}
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
hipLaunchKernelGGL(( TestFromOtherKernelConst), dim3(1), dim3(16), 0, 0, span);
}
}
TEST(GPUSpan, Assignment) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestAssignment{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, TestStatus) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestTestStatus{status.Data()});
ASSERT_EQ(status.Get(), -1);
}
template <typename T>
struct TestEqual {
private:
T *lhs_, *rhs_;
int *status_;
public:
TestEqual(T* _lhs, T* _rhs, int * _status) :
lhs_(_lhs), rhs_(_rhs), status_(_status) {}
XGBOOST_DEVICE void operator()(size_t _idx) {
bool res = lhs_[_idx] == rhs_[_idx];
SPAN_ASSERT_TRUE(res, status_);
}
};
TEST(GPUSpan, WithTrust) {
dh::safe_cuda(hipSetDevice(0));
// Not adviced to initialize span with host_vector, since h_vec.data() is
// a host function.
thrust::host_vector<float> h_vec (16);
std::iota(h_vec.begin(), h_vec.end(), 0);
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// Can't initialize span with device_vector, since d_vec.data() is not raw
// pointer
{
Span<float> s (d_vec.data().get(), d_vec.size());
ASSERT_EQ(d_vec.size(), s.size());
ASSERT_EQ(d_vec.data().get(), s.data());
}
{
TestStatus status;
thrust::device_vector<float> d_vec1 (d_vec.size());
thrust::copy(thrust::device, d_vec.begin(), d_vec.end(), d_vec1.begin());
Span<float> s (d_vec1.data().get(), d_vec.size());
dh::LaunchN(16, TestEqual<float>{
thrust::raw_pointer_cast(d_vec1.data()),
s.data(), status.Data()});
ASSERT_EQ(status.Get(), 1);
// FIXME(trivialfis): memory error!
// bool res = thrust::equal(thrust::device,
// d_vec.begin(), d_vec.end(),
// s.begin());
}
}
TEST(GPUSpan, BeginEnd) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestBeginEnd{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, RBeginREnd) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestRBeginREnd{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
__global__ void TestModifyKernel(Span<float> span) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
span[idx] = span.size() - idx;
}
TEST(GPUSpan, Modify) {
thrust::host_vector<float> h_vec (16);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestModifyKernel), dim3(1), dim3(16), 0, 0, span);
for (size_t i = 0; i < d_vec.size(); ++i) {
ASSERT_EQ(d_vec[i], d_vec.size() - i);
}
}
TEST(GPUSpan, Observers) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestObservers{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, Compare) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterCompare{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
struct TestElementAccess {
private:
Span<float> span_;
public:
XGBOOST_DEVICE explicit TestElementAccess (Span<float> _span) : span_(_span) {}
XGBOOST_DEVICE float operator()(size_t _idx) {
float tmp = span_[_idx];
return tmp;
}
};
TEST(GPUSpanDeathTest, ElementAccess) {
dh::safe_cuda(hipSetDevice(0));
auto test_element_access = []() {
thrust::host_vector<float> h_vec (16);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
dh::LaunchN(17, TestElementAccess{span});
};
testing::internal::CaptureStdout();
EXPECT_DEATH(test_element_access(), "");
std::string output = testing::internal::GetCapturedStdout();
}
__global__ void TestFirstDynamicKernel(Span<float> _span) {
_span.first<static_cast<Span<float>::index_type>(-1)>();
}
__global__ void TestFirstStaticKernel(Span<float> _span) {
_span.first(static_cast<Span<float>::index_type>(-1));
}
__global__ void TestLastDynamicKernel(Span<float> _span) {
_span.last<static_cast<Span<float>::index_type>(-1)>();
}
__global__ void TestLastStaticKernel(Span<float> _span) {
_span.last(static_cast<Span<float>::index_type>(-1));
}
TEST(GPUSpanDeathTest, FirstLast) {
// We construct vectors multiple times since thrust can not recover from
// death test.
auto lambda_first_dy = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestFirstDynamicKernel), dim3(1), dim3(1), 0, 0, span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_first_dy(), "");
std::string output = testing::internal::GetCapturedStdout();
auto lambda_first_static = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestFirstStaticKernel), dim3(1), dim3(1), 0, 0, span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_first_static(), "");
output = testing::internal::GetCapturedStdout();
auto lambda_last_dy = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestLastDynamicKernel), dim3(1), dim3(1), 0, 0, span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_last_dy(), "");
output = testing::internal::GetCapturedStdout();
auto lambda_last_static = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestLastStaticKernel), dim3(1), dim3(1), 0, 0, span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_last_static(), "");
output = testing::internal::GetCapturedStdout();
}
namespace {
void TestFrontBack() {
Span<float> s;
EXPECT_DEATH(
{
// make sure the termination happens inside this test.
try {
dh::LaunchN(1, [=] __device__(size_t) { s.front(); });
dh::safe_cuda(hipDeviceSynchronize());
dh::safe_cuda(hipGetLastError());
} catch (dmlc::Error const& e) {
std::terminate();
}
},
"");
EXPECT_DEATH(
{
try {
dh::LaunchN(1, [=] __device__(size_t) { s.back(); });
dh::safe_cuda(hipDeviceSynchronize());
dh::safe_cuda(hipGetLastError());
} catch (dmlc::Error const& e) {
std::terminate();
}
},
"");
}
} // namespace
TEST(GPUSpanDeathTest, FrontBack) {
TestFrontBack();
}
__global__ void TestSubspanDynamicKernel(Span<float> _span) {
_span.subspan(16, 0);
}
__global__ void TestSubspanStaticKernel(Span<float> _span) {
_span.subspan<16>();
}
TEST(GPUSpanDeathTest, Subspan) {
auto lambda_subspan_dynamic = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestSubspanDynamicKernel), dim3(1), dim3(1), 0, 0, span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_dynamic(), "");
std::string output = testing::internal::GetCapturedStdout();
auto lambda_subspan_static = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
hipLaunchKernelGGL(( TestSubspanStaticKernel), dim3(1), dim3(1), 0, 0, span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_static(), "");
output = testing::internal::GetCapturedStdout();
}
TEST(GPUSpanIter, Construct) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterConstruct{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Ref) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterRef{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Calculate) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterCalculate{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Compare) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterCompare{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, AsBytes) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestAsBytes{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, AsWritableBytes) {
dh::safe_cuda(hipSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestAsWritableBytes{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
} // namespace common
} // namespace xgboost
| 32017b5dab30ce9cc56bef720d1ad60fca5fbc73.cu | /*!
* Copyright 2018 XGBoost contributors
*/
#include <gtest/gtest.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include <thrust/memory.h>
#include "../../../src/common/device_helpers.cuh"
#include <xgboost/span.h>
#include "test_span.h"
namespace xgboost {
namespace common {
struct TestStatus {
private:
int *status_;
public:
TestStatus () {
dh::safe_cuda(cudaMalloc(&status_, sizeof(int)));
int h_status = 1;
dh::safe_cuda(cudaMemcpy(status_, &h_status,
sizeof(int), cudaMemcpyHostToDevice));
}
~TestStatus() {
dh::safe_cuda(cudaFree(status_));
}
int Get() {
int h_status;
dh::safe_cuda(cudaMemcpy(&h_status, status_,
sizeof(int), cudaMemcpyDeviceToHost));
return h_status;
}
int* Data() {
return status_;
}
};
__global__ void TestFromOtherKernel(Span<float> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
// Test converting different T
__global__ void TestFromOtherKernelConst(Span<float const, 16> span) {
// don't get optimized out
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
}
/*!
* \brief Here we just test whether the code compiles.
*/
TEST(GPUSpan, FromOther) {
thrust::host_vector<float> h_vec (16);
std::iota(h_vec.begin(), h_vec.end(), 0);
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// dynamic extent
{
Span<float> span (d_vec.data().get(), d_vec.size());
TestFromOtherKernel<<<1, 16>>>(span);
}
{
Span<float> span (d_vec.data().get(), d_vec.size());
TestFromOtherKernelConst<<<1, 16>>>(span);
}
// static extent
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
TestFromOtherKernel<<<1, 16>>>(span);
}
{
Span<float, 16> span(d_vec.data().get(), d_vec.data().get() + 16);
TestFromOtherKernelConst<<<1, 16>>>(span);
}
}
TEST(GPUSpan, Assignment) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestAssignment{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, TestStatus) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestTestStatus{status.Data()});
ASSERT_EQ(status.Get(), -1);
}
template <typename T>
struct TestEqual {
private:
T *lhs_, *rhs_;
int *status_;
public:
TestEqual(T* _lhs, T* _rhs, int * _status) :
lhs_(_lhs), rhs_(_rhs), status_(_status) {}
XGBOOST_DEVICE void operator()(size_t _idx) {
bool res = lhs_[_idx] == rhs_[_idx];
SPAN_ASSERT_TRUE(res, status_);
}
};
TEST(GPUSpan, WithTrust) {
dh::safe_cuda(cudaSetDevice(0));
// Not adviced to initialize span with host_vector, since h_vec.data() is
// a host function.
thrust::host_vector<float> h_vec (16);
std::iota(h_vec.begin(), h_vec.end(), 0);
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
// Can't initialize span with device_vector, since d_vec.data() is not raw
// pointer
{
Span<float> s (d_vec.data().get(), d_vec.size());
ASSERT_EQ(d_vec.size(), s.size());
ASSERT_EQ(d_vec.data().get(), s.data());
}
{
TestStatus status;
thrust::device_vector<float> d_vec1 (d_vec.size());
thrust::copy(thrust::device, d_vec.begin(), d_vec.end(), d_vec1.begin());
Span<float> s (d_vec1.data().get(), d_vec.size());
dh::LaunchN(16, TestEqual<float>{
thrust::raw_pointer_cast(d_vec1.data()),
s.data(), status.Data()});
ASSERT_EQ(status.Get(), 1);
// FIXME(trivialfis): memory error!
// bool res = thrust::equal(thrust::device,
// d_vec.begin(), d_vec.end(),
// s.begin());
}
}
TEST(GPUSpan, BeginEnd) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestBeginEnd{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, RBeginREnd) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestRBeginREnd{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
__global__ void TestModifyKernel(Span<float> span) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= span.size()) {
return;
}
span[idx] = span.size() - idx;
}
TEST(GPUSpan, Modify) {
thrust::host_vector<float> h_vec (16);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
TestModifyKernel<<<1, 16>>>(span);
for (size_t i = 0; i < d_vec.size(); ++i) {
ASSERT_EQ(d_vec[i], d_vec.size() - i);
}
}
TEST(GPUSpan, Observers) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestObservers{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, Compare) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterCompare{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
struct TestElementAccess {
private:
Span<float> span_;
public:
XGBOOST_DEVICE explicit TestElementAccess (Span<float> _span) : span_(_span) {}
XGBOOST_DEVICE float operator()(size_t _idx) {
float tmp = span_[_idx];
return tmp;
}
};
TEST(GPUSpanDeathTest, ElementAccess) {
dh::safe_cuda(cudaSetDevice(0));
auto test_element_access = []() {
thrust::host_vector<float> h_vec (16);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
dh::LaunchN(17, TestElementAccess{span});
};
testing::internal::CaptureStdout();
EXPECT_DEATH(test_element_access(), "");
std::string output = testing::internal::GetCapturedStdout();
}
__global__ void TestFirstDynamicKernel(Span<float> _span) {
_span.first<static_cast<Span<float>::index_type>(-1)>();
}
__global__ void TestFirstStaticKernel(Span<float> _span) {
_span.first(static_cast<Span<float>::index_type>(-1));
}
__global__ void TestLastDynamicKernel(Span<float> _span) {
_span.last<static_cast<Span<float>::index_type>(-1)>();
}
__global__ void TestLastStaticKernel(Span<float> _span) {
_span.last(static_cast<Span<float>::index_type>(-1));
}
TEST(GPUSpanDeathTest, FirstLast) {
// We construct vectors multiple times since thrust can not recover from
// death test.
auto lambda_first_dy = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
TestFirstDynamicKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_first_dy(), "");
std::string output = testing::internal::GetCapturedStdout();
auto lambda_first_static = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
TestFirstStaticKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_first_static(), "");
output = testing::internal::GetCapturedStdout();
auto lambda_last_dy = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
TestLastDynamicKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_last_dy(), "");
output = testing::internal::GetCapturedStdout();
auto lambda_last_static = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
TestLastStaticKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_last_static(), "");
output = testing::internal::GetCapturedStdout();
}
namespace {
void TestFrontBack() {
Span<float> s;
EXPECT_DEATH(
{
// make sure the termination happens inside this test.
try {
dh::LaunchN(1, [=] __device__(size_t) { s.front(); });
dh::safe_cuda(cudaDeviceSynchronize());
dh::safe_cuda(cudaGetLastError());
} catch (dmlc::Error const& e) {
std::terminate();
}
},
"");
EXPECT_DEATH(
{
try {
dh::LaunchN(1, [=] __device__(size_t) { s.back(); });
dh::safe_cuda(cudaDeviceSynchronize());
dh::safe_cuda(cudaGetLastError());
} catch (dmlc::Error const& e) {
std::terminate();
}
},
"");
}
} // namespace
TEST(GPUSpanDeathTest, FrontBack) {
TestFrontBack();
}
__global__ void TestSubspanDynamicKernel(Span<float> _span) {
_span.subspan(16, 0);
}
__global__ void TestSubspanStaticKernel(Span<float> _span) {
_span.subspan<16>();
}
TEST(GPUSpanDeathTest, Subspan) {
auto lambda_subspan_dynamic = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
TestSubspanDynamicKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_dynamic(), "");
std::string output = testing::internal::GetCapturedStdout();
auto lambda_subspan_static = []() {
thrust::host_vector<float> h_vec (4);
InitializeRange(h_vec.begin(), h_vec.end());
thrust::device_vector<float> d_vec (h_vec.size());
thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin());
Span<float> span (d_vec.data().get(), d_vec.size());
TestSubspanStaticKernel<<<1, 1>>>(span);
};
testing::internal::CaptureStdout();
EXPECT_DEATH(lambda_subspan_static(), "");
output = testing::internal::GetCapturedStdout();
}
TEST(GPUSpanIter, Construct) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterConstruct{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Ref) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterRef{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Calculate) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterCalculate{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpanIter, Compare) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestIterCompare{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, AsBytes) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestAsBytes{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
TEST(GPUSpan, AsWritableBytes) {
dh::safe_cuda(cudaSetDevice(0));
TestStatus status;
dh::LaunchN(16, TestAsWritableBytes{status.Data()});
ASSERT_EQ(status.Get(), 1);
}
} // namespace common
} // namespace xgboost
|
1168043ca77e2af6214a9b408e669b47ca887ab0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-FileType: SOURCE
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the license found in the LICENSE.txt file in the root
* directory of this source tree.
*/
// =======
// Headers
// =======
#include "./cusparse_interface.h"
#include <cassert> // assert
// ==================
// cusparse interface
// ==================
/// \note The implementation in the \c cu file is wrapped inside the
/// namepsace clause. This is not necessary in general, however, it
/// is needed to avoid the old gcc compiler error (this is a gcc
/// bug) which complains "no instance of function template matches
/// the argument list const float".
namespace cusparse_interface
{
// ======================
// create cusparse matrix (float)
// ======================
/// \brief A template wrapper for \c hipsparseSpMatDescr_t for the \c float
/// precision data.
template<>
void create_cusparse_matrix<float>(
hipsparseSpMatDescr_t& cusparse_matrix,
const LongIndexType num_rows,
const LongIndexType num_columns,
const LongIndexType nnz,
float* device_A_data,
LongIndexType* device_A_indices,
LongIndexType* device_A_index_pointer)
{
hipsparseStatus_t status = hipsparseCreateCsr(
&cusparse_matrix, num_rows, num_columns, nnz,
device_A_index_pointer, device_A_indices, device_A_data,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_32F);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// ======================
// create cusparse matrix (double)
// ======================
/// \brief A template wrapper for \c hipsparseSpMatDescr_t for the \c double
/// precision data.
template<>
void create_cusparse_matrix<double>(
hipsparseSpMatDescr_t& cusparse_matrix,
const LongIndexType num_rows,
const LongIndexType num_columns,
const LongIndexType nnz,
double* device_A_data,
LongIndexType* device_A_indices,
LongIndexType* device_A_index_pointer)
{
hipsparseStatus_t status = hipsparseCreateCsr(
&cusparse_matrix, num_rows, num_columns, nnz,
device_A_index_pointer, device_A_indices, device_A_data,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_64F);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// ======================
// create cusparse vector (float)
// ======================
/// \brief A template wrapper for \c hipsparseDnVecDescr_t for the
/// \c float precision data.
///
/// \details Note that according to the cusparse documentation for the
/// function \c hipsparseCreateDnVec, it is safe to use
/// \c const_cast to cast the input vector.
template<>
void create_cusparse_vector<float>(
hipsparseDnVecDescr_t& cusparse_vector,
const LongIndexType vector_size,
float* device_vector)
{
hipsparseStatus_t status = hipsparseCreateDnVec(
&cusparse_vector, vector_size, device_vector, HIP_R_32F);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// ======================
// create cusparse vector (double)
// ======================
/// \brief A template wrapper for \c hipsparseDnVecDescr_t for the
/// \c double precision data.
///
/// \details Note that according to the cusparse documentation for the
/// function \c hipsparseCreateDnVec, it is safe to use
/// \c const_cast to cast the input vector.
template<>
void create_cusparse_vector<double>(
hipsparseDnVecDescr_t& cusparse_vector,
const LongIndexType vector_size,
double* device_vector)
{
hipsparseStatus_t status = hipsparseCreateDnVec(
&cusparse_vector, vector_size, device_vector, HIP_R_64F);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// =======================
// destroy cusparse matrix
// =======================
/// \brief Destroys cusparse matrix.
///
void destroy_cusparse_matrix(
hipsparseSpMatDescr_t& cusparse_matrix)
{
hipsparseStatus_t status = hipsparseDestroySpMat(cusparse_matrix);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// =======================
// destroy cusparse vector
// =======================
/// \brief Destroys cusparse vector.
///
void destroy_cusparse_vector(
hipsparseDnVecDescr_t& cusparse_vector)
{
hipsparseStatus_t status = hipsparseDestroyDnVec(cusparse_vector);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// ===========================
// cusparse matrix buffer size (float)
// ===========================
/// \brief A template wrapper for \c cusparseSpMat_buffersize for \c float
/// precision data. This function determines the buffer size needed
/// for matrix-vector multiplication using \c hipsparseSpMV. The
/// output is \c buffer_size variable.
template<>
void cusparse_matrix_buffer_size<float>(
hipsparseHandle_t cusparse_handle,
hipsparseOperation_t cusparse_operation,
const float alpha,
hipsparseSpMatDescr_t cusparse_matrix,
hipsparseDnVecDescr_t cusparse_input_vector,
const float beta,
hipsparseDnVecDescr_t cusparse_output_vector,
hipsparseSpMVAlg_t algorithm,
size_t* buffer_size)
{
hipsparseStatus_t status = hipsparseSpMV_bufferSize(
cusparse_handle, cusparse_operation, &alpha, cusparse_matrix,
cusparse_input_vector, &beta, cusparse_output_vector,
HIP_R_32F, algorithm, buffer_size);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// ===========================
// cusparse matrix buffer size (double)
// ===========================
/// \brief A template wrapper for \c cusparseSpMat_buffersize for
/// \c double precision data. This function determines the buffer
/// size needed for matrix-vector multiplication using
/// \c hipsparseSpMV. The output is \c buffer_size variable.
template<>
void cusparse_matrix_buffer_size<double>(
hipsparseHandle_t cusparse_handle,
hipsparseOperation_t cusparse_operation,
const double alpha,
hipsparseSpMatDescr_t cusparse_matrix,
hipsparseDnVecDescr_t cusparse_input_vector,
const double beta,
hipsparseDnVecDescr_t cusparse_output_vector,
hipsparseSpMVAlg_t algorithm,
size_t* buffer_size)
{
hipsparseStatus_t status = hipsparseSpMV_bufferSize(
cusparse_handle, cusparse_operation, &alpha, cusparse_matrix,
cusparse_input_vector, &beta, cusparse_output_vector,
HIP_R_64F, algorithm, buffer_size);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// ===============
// cusparse matvec (float)
// ===============
/// \brief A wrapper for \c hipsparseSpMV to perform sparse matrix-vector
/// multiplication uasing \c float precision data.
template<>
void cusparse_matvec<float>(
hipsparseHandle_t cusparse_handle,
hipsparseOperation_t cusparse_operation,
const float alpha,
hipsparseSpMatDescr_t cusparse_matrix,
hipsparseDnVecDescr_t cusparse_input_vector,
const float beta,
hipsparseDnVecDescr_t cusparse_output_vector,
hipsparseSpMVAlg_t algorithm,
void* external_buffer)
{
hipsparseStatus_t status = hipsparseSpMV(cusparse_handle,
cusparse_operation, &alpha,
cusparse_matrix,
cusparse_input_vector, &beta,
cusparse_output_vector,
HIP_R_32F, algorithm,
external_buffer);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
// ===============
// cusparse matvec (double)
// ===============
/// \brief A wrapper for \c hipsparseSpMV to perform sparse matrix-vector
/// multiplication uasing \c double precision data.
template<>
void cusparse_matvec<double>(
hipsparseHandle_t cusparse_handle,
hipsparseOperation_t cusparse_operation,
const double alpha,
hipsparseSpMatDescr_t cusparse_matrix,
hipsparseDnVecDescr_t cusparse_input_vector,
const double beta,
hipsparseDnVecDescr_t cusparse_output_vector,
hipsparseSpMVAlg_t algorithm,
void* external_buffer)
{
hipsparseStatus_t status = hipsparseSpMV(cusparse_handle,
cusparse_operation, &alpha,
cusparse_matrix,
cusparse_input_vector, &beta,
cusparse_output_vector,
HIP_R_64F, algorithm,
external_buffer);
assert(status == HIPSPARSE_STATUS_SUCCESS);
}
} // namespace cusparse_interface
| 1168043ca77e2af6214a9b408e669b47ca887ab0.cu | /*
* SPDX-FileCopyrightText: Copyright 2021, Siavash Ameli <sameli@berkeley.edu>
* SPDX-License-Identifier: BSD-3-Clause
* SPDX-FileType: SOURCE
*
* This program is free software: you can redistribute it and/or modify it
* under the terms of the license found in the LICENSE.txt file in the root
* directory of this source tree.
*/
// =======
// Headers
// =======
#include "./cusparse_interface.h"
#include <cassert> // assert
// ==================
// cusparse interface
// ==================
/// \note The implementation in the \c cu file is wrapped inside the
/// namepsace clause. This is not necessary in general, however, it
/// is needed to avoid the old gcc compiler error (this is a gcc
/// bug) which complains "no instance of function template matches
/// the argument list const float".
namespace cusparse_interface
{
// ======================
// create cusparse matrix (float)
// ======================
/// \brief A template wrapper for \c cusparseSpMatDescr_t for the \c float
/// precision data.
template<>
void create_cusparse_matrix<float>(
cusparseSpMatDescr_t& cusparse_matrix,
const LongIndexType num_rows,
const LongIndexType num_columns,
const LongIndexType nnz,
float* device_A_data,
LongIndexType* device_A_indices,
LongIndexType* device_A_index_pointer)
{
cusparseStatus_t status = cusparseCreateCsr(
&cusparse_matrix, num_rows, num_columns, nnz,
device_A_index_pointer, device_A_indices, device_A_data,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// ======================
// create cusparse matrix (double)
// ======================
/// \brief A template wrapper for \c cusparseSpMatDescr_t for the \c double
/// precision data.
template<>
void create_cusparse_matrix<double>(
cusparseSpMatDescr_t& cusparse_matrix,
const LongIndexType num_rows,
const LongIndexType num_columns,
const LongIndexType nnz,
double* device_A_data,
LongIndexType* device_A_indices,
LongIndexType* device_A_index_pointer)
{
cusparseStatus_t status = cusparseCreateCsr(
&cusparse_matrix, num_rows, num_columns, nnz,
device_A_index_pointer, device_A_indices, device_A_data,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// ======================
// create cusparse vector (float)
// ======================
/// \brief A template wrapper for \c cusparseDnVecDescr_t for the
/// \c float precision data.
///
/// \details Note that according to the cusparse documentation for the
/// function \c cusparseCreateDnVec, it is safe to use
/// \c const_cast to cast the input vector.
template<>
void create_cusparse_vector<float>(
cusparseDnVecDescr_t& cusparse_vector,
const LongIndexType vector_size,
float* device_vector)
{
cusparseStatus_t status = cusparseCreateDnVec(
&cusparse_vector, vector_size, device_vector, CUDA_R_32F);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// ======================
// create cusparse vector (double)
// ======================
/// \brief A template wrapper for \c cusparseDnVecDescr_t for the
/// \c double precision data.
///
/// \details Note that according to the cusparse documentation for the
/// function \c cusparseCreateDnVec, it is safe to use
/// \c const_cast to cast the input vector.
template<>
void create_cusparse_vector<double>(
cusparseDnVecDescr_t& cusparse_vector,
const LongIndexType vector_size,
double* device_vector)
{
cusparseStatus_t status = cusparseCreateDnVec(
&cusparse_vector, vector_size, device_vector, CUDA_R_64F);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// =======================
// destroy cusparse matrix
// =======================
/// \brief Destroys cusparse matrix.
///
void destroy_cusparse_matrix(
cusparseSpMatDescr_t& cusparse_matrix)
{
cusparseStatus_t status = cusparseDestroySpMat(cusparse_matrix);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// =======================
// destroy cusparse vector
// =======================
/// \brief Destroys cusparse vector.
///
void destroy_cusparse_vector(
cusparseDnVecDescr_t& cusparse_vector)
{
cusparseStatus_t status = cusparseDestroyDnVec(cusparse_vector);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// ===========================
// cusparse matrix buffer size (float)
// ===========================
/// \brief A template wrapper for \c cusparseSpMat_buffersize for \c float
/// precision data. This function determines the buffer size needed
/// for matrix-vector multiplication using \c cusparseSpMV. The
/// output is \c buffer_size variable.
template<>
void cusparse_matrix_buffer_size<float>(
cusparseHandle_t cusparse_handle,
cusparseOperation_t cusparse_operation,
const float alpha,
cusparseSpMatDescr_t cusparse_matrix,
cusparseDnVecDescr_t cusparse_input_vector,
const float beta,
cusparseDnVecDescr_t cusparse_output_vector,
cusparseSpMVAlg_t algorithm,
size_t* buffer_size)
{
cusparseStatus_t status = cusparseSpMV_bufferSize(
cusparse_handle, cusparse_operation, &alpha, cusparse_matrix,
cusparse_input_vector, &beta, cusparse_output_vector,
CUDA_R_32F, algorithm, buffer_size);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// ===========================
// cusparse matrix buffer size (double)
// ===========================
/// \brief A template wrapper for \c cusparseSpMat_buffersize for
/// \c double precision data. This function determines the buffer
/// size needed for matrix-vector multiplication using
/// \c cusparseSpMV. The output is \c buffer_size variable.
template<>
void cusparse_matrix_buffer_size<double>(
cusparseHandle_t cusparse_handle,
cusparseOperation_t cusparse_operation,
const double alpha,
cusparseSpMatDescr_t cusparse_matrix,
cusparseDnVecDescr_t cusparse_input_vector,
const double beta,
cusparseDnVecDescr_t cusparse_output_vector,
cusparseSpMVAlg_t algorithm,
size_t* buffer_size)
{
cusparseStatus_t status = cusparseSpMV_bufferSize(
cusparse_handle, cusparse_operation, &alpha, cusparse_matrix,
cusparse_input_vector, &beta, cusparse_output_vector,
CUDA_R_64F, algorithm, buffer_size);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// ===============
// cusparse matvec (float)
// ===============
/// \brief A wrapper for \c cusparseSpMV to perform sparse matrix-vector
/// multiplication uasing \c float precision data.
template<>
void cusparse_matvec<float>(
cusparseHandle_t cusparse_handle,
cusparseOperation_t cusparse_operation,
const float alpha,
cusparseSpMatDescr_t cusparse_matrix,
cusparseDnVecDescr_t cusparse_input_vector,
const float beta,
cusparseDnVecDescr_t cusparse_output_vector,
cusparseSpMVAlg_t algorithm,
void* external_buffer)
{
cusparseStatus_t status = cusparseSpMV(cusparse_handle,
cusparse_operation, &alpha,
cusparse_matrix,
cusparse_input_vector, &beta,
cusparse_output_vector,
CUDA_R_32F, algorithm,
external_buffer);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
// ===============
// cusparse matvec (double)
// ===============
/// \brief A wrapper for \c cusparseSpMV to perform sparse matrix-vector
/// multiplication uasing \c double precision data.
template<>
void cusparse_matvec<double>(
cusparseHandle_t cusparse_handle,
cusparseOperation_t cusparse_operation,
const double alpha,
cusparseSpMatDescr_t cusparse_matrix,
cusparseDnVecDescr_t cusparse_input_vector,
const double beta,
cusparseDnVecDescr_t cusparse_output_vector,
cusparseSpMVAlg_t algorithm,
void* external_buffer)
{
cusparseStatus_t status = cusparseSpMV(cusparse_handle,
cusparse_operation, &alpha,
cusparse_matrix,
cusparse_input_vector, &beta,
cusparse_output_vector,
CUDA_R_64F, algorithm,
external_buffer);
assert(status == CUSPARSE_STATUS_SUCCESS);
}
} // namespace cusparse_interface
|
068adccecadd93733ea4a947ea943d3e63940398.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void saxpy(int n, float, a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(flaot));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
}
| 068adccecadd93733ea4a947ea943d3e63940398.cu | #include <stdio.h>
__global__ void saxpy(int n, float, a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(flaot));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
}
|
5450515a8a09ffd014864b92b2d431f9191f2a52.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <type_traits>
#include <cmath>
//helper_cuda fue extraido desde "cuda samples"
#include "helper_cuda.h"
#define norm1(x, y) (sqrt(x*x + y*y))
#define norm2(x, y) (fabs(x) + fabs(y))
#define norm3(x, y) (atan(y/x))
//estructura usada para guardar una matriz compatible con cuda
//eval y h_eval son usadas dentro del filtrado
template<class T>
struct Mat
{
T
*ptr;
size_t
*pitch,
*rows,
*cols;
inline __device__ T Get(size_t row, size_t col)
{
return *((T*)((char*)ptr + row * *pitch) + col);
}
inline __device__ T Set(size_t row, size_t col, T value)
{
*((T*)((char*)ptr + row * *pitch) + col) = value;
}
__device__ T eval(Mat<T> ref, size_t i_o, size_t j_o, size_t dh)
{
T result = 0;
size_t i, j, i2, j2;
size_t row_o, col_o;
row_o = i_o - dh;
col_o = j_o - dh;
for (i = row_o, i2 = 0; i <= i_o + dh; i++, i2++)
{
for (j = col_o, j2 = 0; j <= j_o + dh; j++, j2++)
{
result += Get(i, j) * ref.Get(i2, j2);
}
}
return result;
}
inline T h_Get(size_t row, size_t col)
{
return *((T*)((char*)ptr + row * *pitch) + col);
}
inline T h_Set(size_t row, size_t col, T value)
{
*((T*)((char*)ptr + row * *pitch) + col) = value;
}
T h_eval(Mat<T> ref, size_t i_o, size_t j_o, size_t dh)
{
T result = 0;
size_t i, j, i2, j2;
size_t row_o, col_o;
row_o = i_o - dh;
col_o = j_o - dh;
for (i = row_o, i2 = 0; i <= i_o + dh; i++, i2++)
{
for (j = col_o, j2 = 0; j <= j_o + dh; j++, j2++)
{
result += h_Get(i, j) * ref.h_Get(i2, j2);
}
}
return result;
}
};
//crea una matriz "2d" en el device de tamao rows x cols e inicializa con src
template<class T>
static inline Mat<T> createDevMat2d(size_t rows, size_t cols, T *src)
{
Mat<T>
dst;
size_t
pitch,
width,
height;
width = cols * sizeof(T);
height = rows;
checkCudaErrors(hipMallocPitch(&(dst.ptr), &pitch, width, height));
checkCudaErrors(hipMalloc(&(dst.pitch), sizeof(size_t)));
checkCudaErrors(hipMalloc(&(dst.rows), sizeof(size_t)));
checkCudaErrors(hipMalloc(&(dst.cols), sizeof(size_t)));
checkCudaErrors(hipMemcpy2D(dst.ptr, pitch, src, width, width, height, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dst.pitch, &pitch, sizeof(size_t), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dst.rows, &rows, sizeof(size_t), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(dst.cols, &cols, sizeof(size_t), hipMemcpyHostToDevice));
return dst;
}
//crea una matriz "2d" en el host de tamao rows x cols e inicializa con src
template<class T>
static inline Mat<T> createHostMat2d(size_t rows, size_t cols, T *src)
{
Mat<T>
dst;
size_t
pitch,
width,
height;
width = cols * sizeof(T);
height = rows;
dst.ptr = src;
dst.pitch = new size_t[1]{width};
dst.rows = new size_t[1]{rows};
dst.cols = new size_t[1]{cols};
return dst;
}
//realiza la convolucion 2d en el device de img con el nucleo kernel y lo guarda en result
template<class T>
__global__ void conv2d2(Mat<T> img, Mat<T> kernel, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y, offset_x, offset_y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(kernel.rows) / 2));
offset_x = gridDim.x * blockDim.x;
offset_y = gridDim.y * blockDim.y;
for (tid_x = threadIdx.x + blockIdx.x * blockDim.x; tid_x < rows; tid_x += offset_x)
for (tid_y = threadIdx.y + blockIdx.y * blockDim.y; tid_y < cols; tid_y += offset_y)
result[tid_x * cols + tid_y] = img.eval(kernel, tid_x, tid_y, dh);
}
//realiza la convolucion 2d en el host de img con el nucleo kernel y lo guarda en result
template<class T>
static void cpu_conv2d2(Mat<T> img, Mat<T> kernel, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(kernel.rows) / 2));
for (tid_x = dh; tid_x < rows - dh; tid_x++)
for (tid_y = dh; tid_y < cols - dh; tid_y++)
result[tid_x * cols + tid_y] = img.h_eval(kernel, tid_x, tid_y, dh);
}
//realiza el filtro de Sobel en el device de img usando gradiente=norm((Gx, Gy)) y lo guada en result
template<class T>
__global__ void sobelFilter(Mat<T> img, Mat<T> Gx, Mat<T> Gy, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y, offset_x, offset_y;
T x, y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(Gx.rows) / 2));
offset_x = gridDim.x * blockDim.x;
offset_y = gridDim.y * blockDim.y;
for (tid_x = threadIdx.x + blockIdx.x * blockDim.x; tid_x < rows; tid_x += offset_x)
for (tid_y = threadIdx.y + blockIdx.y * blockDim.y; tid_y < cols; tid_y += offset_y)
{
x = img.eval(Gx, tid_x, tid_y, dh);
y = img.eval(Gy, tid_x, tid_y, dh);
result[tid_x * cols + tid_y] = static_cast<T>(norm2(x, y));
}
}
//realiza el filtro de Sobel en el host de img usando gradiente=norm((Gx, Gy)) y lo guada en result
template<class T>
static void cpu_sobelFilter(Mat<T> img, Mat<T> Gx, Mat<T> Gy, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y;
T x, y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(Gx.rows) / 2));
for (tid_x = dh; tid_x < rows - dh; tid_x++)
for (tid_y = dh; tid_y < cols - dh; tid_y++)
{
x = img.h_eval(Gx, tid_x, tid_y, dh);
y = img.h_eval(Gy, tid_x, tid_y, dh);
result[tid_x * cols + tid_y] = static_cast<T>(norm2(x, y));
}
}
//extrae el contenido de un cv::Mat_<T> a un puntero a T
template<class T>
static T *Mat2Pointer(cv::Mat img)
{
T *ptr = new T[img.rows * img.cols];
for (int i = 0; i < img.rows; i++)
memcpy(&(ptr[i*img.cols]), img.ptr<T>(i, 0), img.cols * sizeof(T));
return ptr;
}
//copia el contenido de un puntero a T hacia cv::Mat_<T>
//soporta float y uchar
template<class T>
static cv::Mat Pointer2Mat(T *ptr, size_t rows, size_t cols)
{
cv::Mat img;
if (std::is_same<T, float>::value)
img = cv::Mat::zeros(rows, cols, CV_32FC1);
else
img = cv::Mat::zeros(rows, cols, CV_8UC1);
for (int i = 0; i < img.rows; i++)
memcpy(img.ptr<T>(i, 0), &(ptr[i*img.cols]), img.cols * sizeof(T));
return img;
}
typedef float type_ref;
//define las especializaciones de conv2d2 y sobelFilter con type_ref
template __global__ void conv2d2<type_ref>(Mat<type_ref> img, Mat<type_ref> kernel, type_ref *result);
template __global__ void sobelFilter<type_ref>(Mat<type_ref> img, Mat<type_ref> Gx, Mat<type_ref> Gy, type_ref *result);
//prueba de la derivada direcional en x e y y su conbinacion en el filtro de Sobel sobre el device
static void test_gpu()
{
//hipEvent_t start, stop;
cv::Mat
in;
size_t
rows,
cols;
type_ref
*src_img,
*src_kernel_g_x,
*src_kernel_g_y;
Mat<type_ref>
d_img,
d_kernel_g_x,
d_kernel_g_y;
type_ref
*d_result,
*h_result;
dim3
Blocks,
Threads;
float
t_gx = 0;
//hipEventCreate(&start);
//hipEventCreate(&stop);
Blocks = dim3(4, 4);
Threads = dim3(5, 5);
in = cv::imread("modelos.jpg", CV_LOAD_IMAGE_GRAYSCALE);
in.convertTo(in, CV_32FC1);
rows = in.rows;
cols = in.cols;
src_img = Mat2Pointer<type_ref>(in);
d_img = createDevMat2d(rows, cols, src_img);
h_result = new type_ref[rows * cols];
checkCudaErrors(hipMalloc((void **)&d_result, rows * cols * sizeof(type_ref)));
//Gx
printf("Testing Gx-filter..\n");
src_kernel_g_x = new type_ref[3 * 3]{
-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
d_kernel_g_x = createDevMat2d(3, 3, src_kernel_g_x);
//hipEventRecord(start);
conv2d2<type_ref> << <Blocks, Threads >> >(d_img, d_kernel_g_x, d_result);
//checkCudaErrors(hipDeviceSynchronize());
//hipEventRecord(stop);
//hipEventSynchronize(stop);
//hipEventElapsedTime(&t_gx, start, stop);
checkCudaErrors(hipMemcpy(h_result, d_result, rows * cols * sizeof(type_ref), hipMemcpyDeviceToHost));
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gx.jpg", in);
//Gy
printf("Testing Gy-filter..\n");
src_kernel_g_y = new type_ref[3 * 3]{
1, 2, 1,
0, 0, 0,
-1, -2, -1};
d_kernel_g_y = createDevMat2d(3, 3, src_kernel_g_y);
conv2d2<type_ref> << <Blocks, Threads >> >(d_img, d_kernel_g_y, d_result);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(h_result, d_result, rows * cols * sizeof(type_ref), hipMemcpyDeviceToHost));
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gy.jpg", in);
//Sobel Filter
printf("Testing gradient..\n");
sobelFilter<type_ref> << <Blocks, Threads >> >(d_img, d_kernel_g_x, d_kernel_g_y, d_result);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(h_result, d_result, rows * cols * sizeof(type_ref), hipMemcpyDeviceToHost));
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("SobelFilter.jpg", in);
}
//prueba de la derivada direcional en x e y y su conbinacion en el filtro de Sobel sobre el host
static void test_cpu()
{
cv::Mat
in;
size_t
rows,
cols;
type_ref
*src_img,
*src_kernel_g_x,
*src_kernel_g_y;
Mat<type_ref>
h_img,
kernel_g_x,
kernel_g_y;
type_ref
*d_result,
*h_result;
dim3
Blocks,
Threads(5, 5);
Blocks = dim3(4, 4);
Threads = dim3(5, 5);
in = cv::imread("modelos.jpg", CV_LOAD_IMAGE_GRAYSCALE);
in.convertTo(in, CV_32FC1);
rows = in.rows;
cols = in.cols;
src_img = Mat2Pointer<type_ref>(in);
h_img = createHostMat2d(rows, cols, src_img);
h_result = new type_ref[rows * cols];
src_kernel_g_x = new type_ref[3 * 3]{
-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
src_kernel_g_y = new type_ref[3 * 3]{
1, 2, 1,
0, 0, 0,
-1, -2, -1};
//Gx
printf("Testing Gx-filter..\n");
kernel_g_x = createHostMat2d(3, 3, src_kernel_g_x);
cpu_conv2d2<type_ref>(h_img, kernel_g_x, h_result);
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gx_cpu.jpg", in);
//return;
//Gy
printf("Testing Gy-filter..\n");
kernel_g_y = createHostMat2d(3, 3, src_kernel_g_y);
cpu_conv2d2<type_ref>(h_img, kernel_g_y, h_result);
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gy_cpu.jpg", in);
//Sobel Filter
printf("Testing gradient..\n");
cpu_sobelFilter<type_ref>(h_img, kernel_g_x, kernel_g_y, h_result);
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("SobelFilter_cpu.jpg", in);
}
int main()
{
printf("******Testing cpu..\n");
test_cpu();
printf("******Testing gpu..\n");
test_gpu();
return 0;
}
| 5450515a8a09ffd014864b92b2d431f9191f2a52.cu | #pragma once
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <type_traits>
#include <cmath>
//helper_cuda fue extraido desde "cuda samples"
#include "helper_cuda.h"
#define norm1(x, y) (sqrt(x*x + y*y))
#define norm2(x, y) (fabs(x) + fabs(y))
#define norm3(x, y) (atan(y/x))
//estructura usada para guardar una matriz compatible con cuda
//eval y h_eval son usadas dentro del filtrado
template<class T>
struct Mat
{
T
*ptr;
size_t
*pitch,
*rows,
*cols;
inline __device__ T Get(size_t row, size_t col)
{
return *((T*)((char*)ptr + row * *pitch) + col);
}
inline __device__ T Set(size_t row, size_t col, T value)
{
*((T*)((char*)ptr + row * *pitch) + col) = value;
}
__device__ T eval(Mat<T> ref, size_t i_o, size_t j_o, size_t dh)
{
T result = 0;
size_t i, j, i2, j2;
size_t row_o, col_o;
row_o = i_o - dh;
col_o = j_o - dh;
for (i = row_o, i2 = 0; i <= i_o + dh; i++, i2++)
{
for (j = col_o, j2 = 0; j <= j_o + dh; j++, j2++)
{
result += Get(i, j) * ref.Get(i2, j2);
}
}
return result;
}
inline T h_Get(size_t row, size_t col)
{
return *((T*)((char*)ptr + row * *pitch) + col);
}
inline T h_Set(size_t row, size_t col, T value)
{
*((T*)((char*)ptr + row * *pitch) + col) = value;
}
T h_eval(Mat<T> ref, size_t i_o, size_t j_o, size_t dh)
{
T result = 0;
size_t i, j, i2, j2;
size_t row_o, col_o;
row_o = i_o - dh;
col_o = j_o - dh;
for (i = row_o, i2 = 0; i <= i_o + dh; i++, i2++)
{
for (j = col_o, j2 = 0; j <= j_o + dh; j++, j2++)
{
result += h_Get(i, j) * ref.h_Get(i2, j2);
}
}
return result;
}
};
//crea una matriz "2d" en el device de tamaņo rows x cols e inicializa con src
template<class T>
static inline Mat<T> createDevMat2d(size_t rows, size_t cols, T *src)
{
Mat<T>
dst;
size_t
pitch,
width,
height;
width = cols * sizeof(T);
height = rows;
checkCudaErrors(cudaMallocPitch(&(dst.ptr), &pitch, width, height));
checkCudaErrors(cudaMalloc(&(dst.pitch), sizeof(size_t)));
checkCudaErrors(cudaMalloc(&(dst.rows), sizeof(size_t)));
checkCudaErrors(cudaMalloc(&(dst.cols), sizeof(size_t)));
checkCudaErrors(cudaMemcpy2D(dst.ptr, pitch, src, width, width, height, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dst.pitch, &pitch, sizeof(size_t), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dst.rows, &rows, sizeof(size_t), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(dst.cols, &cols, sizeof(size_t), cudaMemcpyHostToDevice));
return dst;
}
//crea una matriz "2d" en el host de tamaņo rows x cols e inicializa con src
template<class T>
static inline Mat<T> createHostMat2d(size_t rows, size_t cols, T *src)
{
Mat<T>
dst;
size_t
pitch,
width,
height;
width = cols * sizeof(T);
height = rows;
dst.ptr = src;
dst.pitch = new size_t[1]{width};
dst.rows = new size_t[1]{rows};
dst.cols = new size_t[1]{cols};
return dst;
}
//realiza la convolucion 2d en el device de img con el nucleo kernel y lo guarda en result
template<class T>
__global__ void conv2d2(Mat<T> img, Mat<T> kernel, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y, offset_x, offset_y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(kernel.rows) / 2));
offset_x = gridDim.x * blockDim.x;
offset_y = gridDim.y * blockDim.y;
for (tid_x = threadIdx.x + blockIdx.x * blockDim.x; tid_x < rows; tid_x += offset_x)
for (tid_y = threadIdx.y + blockIdx.y * blockDim.y; tid_y < cols; tid_y += offset_y)
result[tid_x * cols + tid_y] = img.eval(kernel, tid_x, tid_y, dh);
}
//realiza la convolucion 2d en el host de img con el nucleo kernel y lo guarda en result
template<class T>
static void cpu_conv2d2(Mat<T> img, Mat<T> kernel, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(kernel.rows) / 2));
for (tid_x = dh; tid_x < rows - dh; tid_x++)
for (tid_y = dh; tid_y < cols - dh; tid_y++)
result[tid_x * cols + tid_y] = img.h_eval(kernel, tid_x, tid_y, dh);
}
//realiza el filtro de Sobel en el device de img usando gradiente=norm((Gx, Gy)) y lo guada en result
template<class T>
__global__ void sobelFilter(Mat<T> img, Mat<T> Gx, Mat<T> Gy, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y, offset_x, offset_y;
T x, y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(Gx.rows) / 2));
offset_x = gridDim.x * blockDim.x;
offset_y = gridDim.y * blockDim.y;
for (tid_x = threadIdx.x + blockIdx.x * blockDim.x; tid_x < rows; tid_x += offset_x)
for (tid_y = threadIdx.y + blockIdx.y * blockDim.y; tid_y < cols; tid_y += offset_y)
{
x = img.eval(Gx, tid_x, tid_y, dh);
y = img.eval(Gy, tid_x, tid_y, dh);
result[tid_x * cols + tid_y] = static_cast<T>(norm2(x, y));
}
}
//realiza el filtro de Sobel en el host de img usando gradiente=norm((Gx, Gy)) y lo guada en result
template<class T>
static void cpu_sobelFilter(Mat<T> img, Mat<T> Gx, Mat<T> Gy, T *result)
{
size_t dh;
size_t rows, cols;
size_t tid_x, tid_y;
T x, y;
rows = *(img.rows);
cols = *(img.cols);
dh = floor(static_cast<T>(*(Gx.rows) / 2));
for (tid_x = dh; tid_x < rows - dh; tid_x++)
for (tid_y = dh; tid_y < cols - dh; tid_y++)
{
x = img.h_eval(Gx, tid_x, tid_y, dh);
y = img.h_eval(Gy, tid_x, tid_y, dh);
result[tid_x * cols + tid_y] = static_cast<T>(norm2(x, y));
}
}
//extrae el contenido de un cv::Mat_<T> a un puntero a T
template<class T>
static T *Mat2Pointer(cv::Mat img)
{
T *ptr = new T[img.rows * img.cols];
for (int i = 0; i < img.rows; i++)
memcpy(&(ptr[i*img.cols]), img.ptr<T>(i, 0), img.cols * sizeof(T));
return ptr;
}
//copia el contenido de un puntero a T hacia cv::Mat_<T>
//soporta float y uchar
template<class T>
static cv::Mat Pointer2Mat(T *ptr, size_t rows, size_t cols)
{
cv::Mat img;
if (std::is_same<T, float>::value)
img = cv::Mat::zeros(rows, cols, CV_32FC1);
else
img = cv::Mat::zeros(rows, cols, CV_8UC1);
for (int i = 0; i < img.rows; i++)
memcpy(img.ptr<T>(i, 0), &(ptr[i*img.cols]), img.cols * sizeof(T));
return img;
}
typedef float type_ref;
//define las especializaciones de conv2d2 y sobelFilter con type_ref
template __global__ void conv2d2<type_ref>(Mat<type_ref> img, Mat<type_ref> kernel, type_ref *result);
template __global__ void sobelFilter<type_ref>(Mat<type_ref> img, Mat<type_ref> Gx, Mat<type_ref> Gy, type_ref *result);
//prueba de la derivada direcional en x e y y su conbinacion en el filtro de Sobel sobre el device
static void test_gpu()
{
//cudaEvent_t start, stop;
cv::Mat
in;
size_t
rows,
cols;
type_ref
*src_img,
*src_kernel_g_x,
*src_kernel_g_y;
Mat<type_ref>
d_img,
d_kernel_g_x,
d_kernel_g_y;
type_ref
*d_result,
*h_result;
dim3
Blocks,
Threads;
float
t_gx = 0;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
Blocks = dim3(4, 4);
Threads = dim3(5, 5);
in = cv::imread("modelos.jpg", CV_LOAD_IMAGE_GRAYSCALE);
in.convertTo(in, CV_32FC1);
rows = in.rows;
cols = in.cols;
src_img = Mat2Pointer<type_ref>(in);
d_img = createDevMat2d(rows, cols, src_img);
h_result = new type_ref[rows * cols];
checkCudaErrors(cudaMalloc((void **)&d_result, rows * cols * sizeof(type_ref)));
//Gx
printf("Testing Gx-filter..\n");
src_kernel_g_x = new type_ref[3 * 3]{
-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
d_kernel_g_x = createDevMat2d(3, 3, src_kernel_g_x);
//cudaEventRecord(start);
conv2d2<type_ref> << <Blocks, Threads >> >(d_img, d_kernel_g_x, d_result);
//checkCudaErrors(cudaDeviceSynchronize());
//cudaEventRecord(stop);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&t_gx, start, stop);
checkCudaErrors(cudaMemcpy(h_result, d_result, rows * cols * sizeof(type_ref), cudaMemcpyDeviceToHost));
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gx.jpg", in);
//Gy
printf("Testing Gy-filter..\n");
src_kernel_g_y = new type_ref[3 * 3]{
1, 2, 1,
0, 0, 0,
-1, -2, -1};
d_kernel_g_y = createDevMat2d(3, 3, src_kernel_g_y);
conv2d2<type_ref> << <Blocks, Threads >> >(d_img, d_kernel_g_y, d_result);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(h_result, d_result, rows * cols * sizeof(type_ref), cudaMemcpyDeviceToHost));
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gy.jpg", in);
//Sobel Filter
printf("Testing gradient..\n");
sobelFilter<type_ref> << <Blocks, Threads >> >(d_img, d_kernel_g_x, d_kernel_g_y, d_result);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(h_result, d_result, rows * cols * sizeof(type_ref), cudaMemcpyDeviceToHost));
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("SobelFilter.jpg", in);
}
//prueba de la derivada direcional en x e y y su conbinacion en el filtro de Sobel sobre el host
static void test_cpu()
{
cv::Mat
in;
size_t
rows,
cols;
type_ref
*src_img,
*src_kernel_g_x,
*src_kernel_g_y;
Mat<type_ref>
h_img,
kernel_g_x,
kernel_g_y;
type_ref
*d_result,
*h_result;
dim3
Blocks,
Threads(5, 5);
Blocks = dim3(4, 4);
Threads = dim3(5, 5);
in = cv::imread("modelos.jpg", CV_LOAD_IMAGE_GRAYSCALE);
in.convertTo(in, CV_32FC1);
rows = in.rows;
cols = in.cols;
src_img = Mat2Pointer<type_ref>(in);
h_img = createHostMat2d(rows, cols, src_img);
h_result = new type_ref[rows * cols];
src_kernel_g_x = new type_ref[3 * 3]{
-1, 0, 1,
-2, 0, 2,
-1, 0, 1};
src_kernel_g_y = new type_ref[3 * 3]{
1, 2, 1,
0, 0, 0,
-1, -2, -1};
//Gx
printf("Testing Gx-filter..\n");
kernel_g_x = createHostMat2d(3, 3, src_kernel_g_x);
cpu_conv2d2<type_ref>(h_img, kernel_g_x, h_result);
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gx_cpu.jpg", in);
//return;
//Gy
printf("Testing Gy-filter..\n");
kernel_g_y = createHostMat2d(3, 3, src_kernel_g_y);
cpu_conv2d2<type_ref>(h_img, kernel_g_y, h_result);
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("Gy_cpu.jpg", in);
//Sobel Filter
printf("Testing gradient..\n");
cpu_sobelFilter<type_ref>(h_img, kernel_g_x, kernel_g_y, h_result);
in = Pointer2Mat<type_ref>(h_result, in.rows, in.cols);
in.convertTo(in, CV_8UC1);
imwrite("SobelFilter_cpu.jpg", in);
}
int main()
{
printf("******Testing cpu..\n");
test_cpu();
printf("******Testing gpu..\n");
test_gpu();
return 0;
}
|
aa5648c97a5500b741699065dd879e522ed61555.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// compute the square of first 64 whole numbers using 64 threads on the device
#include <stdio.h>
__global__ void square(float *d_out,float *d_in)
{
int idx = threadIdx.x;
float f = (float) d_in[idx];
d_out[idx] = f*f;
}
int main(int argc,char* argv[])
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//declaration
float h_in[ARRAY_SIZE] ,h_out[ARRAY_SIZE]; //host
float *d_out ,*d_in; // device
//generate the input
for(int i=0;i<ARRAY_SIZE;i++)
h_in[i] = i;
//allocate memory on the device
hipMalloc((void**) &d_in , ARRAY_BYTES);
hipMalloc((void**) &d_out , ARRAY_BYTES);
//tranfer data host to device
hipMemcpy(d_in,h_in,ARRAY_BYTES,hipMemcpyHostToDevice);
//launch kernel
hipLaunchKernelGGL(( square), dim3(1),dim3(64), 0, 0, d_out,d_in);
//tranfer data form device to host
hipMemcpy(h_out,d_out,ARRAY_BYTES,hipMemcpyDeviceToHost);
//display results
for(int i=0;i<ARRAY_SIZE;i++)
{
printf("%f", h_out[i]);
(i%4 == 0) ? printf("\n") : printf("\t");
}
printf("\n");
} | aa5648c97a5500b741699065dd879e522ed61555.cu | // compute the square of first 64 whole numbers using 64 threads on the device
#include <stdio.h>
__global__ void square(float *d_out,float *d_in)
{
int idx = threadIdx.x;
float f = (float) d_in[idx];
d_out[idx] = f*f;
}
int main(int argc,char* argv[])
{
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//declaration
float h_in[ARRAY_SIZE] ,h_out[ARRAY_SIZE]; //host
float *d_out ,*d_in; // device
//generate the input
for(int i=0;i<ARRAY_SIZE;i++)
h_in[i] = i;
//allocate memory on the device
cudaMalloc((void**) &d_in , ARRAY_BYTES);
cudaMalloc((void**) &d_out , ARRAY_BYTES);
//tranfer data host to device
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
//launch kernel
square<<<1,64>>>(d_out,d_in);
//tranfer data form device to host
cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
//display results
for(int i=0;i<ARRAY_SIZE;i++)
{
printf("%f", h_out[i]);
(i%4 == 0) ? printf("\n") : printf("\t");
}
printf("\n");
} |
2a0f7f038993202b34f240a764ffcde139f39255.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Travail ralis en binme par Corentin Ballot et Camille Simon
//
// Pour le bien de vos yeux, il est fortement conseill de modifier
// les couleurs avant excution
//
// Pour chaque jeu de vitesse de diffusion nous avons prvu un
// jeu de couleur assorti
// Retirez les commantaires des vitesses de diffusion pour dcouvrir
// un affichage pens rien que pour vous
#include "gpu_bitmap.h"
#define WIDTH 800
#define HEIGHT 600
#define DIM 16
#define TAUX_REACTION_A 0.04f
#define TAUX_REACTION_I (TAUX_REACTION_A / 200)
#define TAUX_RESORPTION_A 0.06f
#define TAUX_RESORPTION_I TAUX_RESORPTION_A
// Lampe lave
//#define VITESSE_DIFFUSION_A 50
//#define VITESSE_DIFFUSION_I 55
// Lopard revisit
//#define VITESSE_DIFFUSION_A 2
//#define VITESSE_DIFFUSION_I 22
// Annes yeahyeah
#define VITESSE_DIFFUSION_A 5
#define VITESSE_DIFFUSION_I 10
#define TAUX_DIFFUSION_A 0.065f
#define TAUX_DIFFUSION_I 0.04f
#define SEUIL 130
__global__ void color(float *t, uchar4 *buf) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < WIDTH && y < HEIGHT) {
int offset = y * WIDTH + x;
float t3 = t[offset];
float r, g, b;
switch(VITESSE_DIFFUSION_A){
case 5 :
if (t3 < SEUIL) {
r = 255; g = 0; b = 127;
} else {
r = 0; g = 47; b = 167;
}
break;
case 2 :
if (t3 < SEUIL) {
r = 240; g = 195; b = 0;
} else {
r = 63; g = 34; b = 4;
}
break;
case 50 :
if (t3 < SEUIL) {
r = 128; g = 0; b = 128;
} else {
r = 223; g = 109; b = 20;
}
break;
}
buf[offset].x = r;
buf[offset].y = g;
buf[offset].z = b;
buf[offset].w = 255;
}
}
__global__ void reaction(float *a, float *a1, float *i, float *i1) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < WIDTH && y < HEIGHT) {
int offset = y * WIDTH + x;
// A est catalys par A et inhib par I
a1[offset] = a[offset] + TAUX_REACTION_A * a[offset] * a[offset] / i[offset];
// I est catalys par A
i1[offset] = i[offset] + TAUX_REACTION_I * a[offset] * a[offset];
// la raction consomme une certaine quantit de A et de I
a1[offset] = (1 - TAUX_RESORPTION_A) * a1[offset];
i1[offset] = (1 - TAUX_RESORPTION_I) * i1[offset];
}
}
__global__ void diffusion(float *grille, float taux_diffusion) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < WIDTH && y < HEIGHT) {
int offset = y * WIDTH + x;
int top = y == HEIGHT - 1 ? offset : offset + WIDTH;
int bottom = y == 0 ? offset : offset - WIDTH;
int left = x == 0 ? offset : offset - 1;
int right = x == WIDTH - 1 ? offset : offset + 1;
grille[offset] = (1 - 4 * taux_diffusion) * grille[offset] +
taux_diffusion * (grille[top] + grille[bottom] + grille[left] + grille[right]);
}
}
struct Data {
float *A;
float *A_next;
float *I;
float *I_next;
dim3 blocks;
dim3 threads;
};
void render_callback(uchar4 *buf, Data *data, int ticks) {
hipLaunchKernelGGL(( reaction), dim3(data->blocks), dim3(data->threads), 0, 0, data->A, data->A_next, data->I, data->I_next);
hipLaunchKernelGGL(( reaction), dim3(data->blocks), dim3(data->threads), 0, 0, data->A_next, data->A, data->I_next, data->I);
for (int s = 0; s < VITESSE_DIFFUSION_A; s++)
hipLaunchKernelGGL(( diffusion), dim3(data->blocks), dim3(data->threads), 0, 0, data->A, TAUX_DIFFUSION_A);
for (int s = 0; s < VITESSE_DIFFUSION_I; s++)
hipLaunchKernelGGL(( diffusion), dim3(data->blocks), dim3(data->threads), 0, 0, data->I, TAUX_DIFFUSION_I);
hipLaunchKernelGGL(( color), dim3(data->blocks), dim3(data->threads), 0, 0, data->A_next, buf);
}
void clean_callback(Data *data) {
HANDLE_CUDA_ERR(hipFree(data->A));
HANDLE_CUDA_ERR(hipFree(data->I));
}
int main() {
Data data;
GPUBitmap bitmap(WIDTH, HEIGHT, &data, "Heat");
size_t size = WIDTH * HEIGHT * sizeof(float);
float *a_initial = (float *)calloc(WIDTH * HEIGHT, sizeof(float));
float *i_initial = (float *)calloc(WIDTH * HEIGHT, sizeof(float));
for (int y = 0; y < HEIGHT * WIDTH + WIDTH; y++) {
a_initial[y] = (rand() % 100) + 1;
i_initial[y] = (rand() % 100) + 1;
}
data.blocks = dim3((WIDTH + DIM - 1) / DIM, (HEIGHT + DIM - 1) / DIM);
data.threads = dim3(DIM, DIM);
HANDLE_CUDA_ERR(hipMalloc(&data.A, size));
HANDLE_CUDA_ERR(hipMalloc(&data.I, size));
HANDLE_CUDA_ERR(hipMalloc(&data.A_next, size));
HANDLE_CUDA_ERR(hipMalloc(&data.I_next, size));
HANDLE_CUDA_ERR(hipMemcpy(data.A, a_initial, size, hipMemcpyHostToDevice));
HANDLE_CUDA_ERR(hipMemcpy(data.I, i_initial, size, hipMemcpyHostToDevice));
bitmap.animate((void (*)(uchar4*, void*, int))render_callback, (void (*)(void*))clean_callback);
return 0;
} | 2a0f7f038993202b34f240a764ffcde139f39255.cu | // Travail réalisé en binôme par Corentin Ballot et Camille Simon
//
// Pour le bien de vos yeux, il est fortement conseillé de modifier
// les couleurs avant exécution
//
// Pour chaque jeu de vitesse de diffusion nous avons prévu un
// jeu de couleur assorti
// Retirez les commantaires des vitesses de diffusion pour découvrir
// un affichage pensé rien que pour vous
#include "gpu_bitmap.h"
#define WIDTH 800
#define HEIGHT 600
#define DIM 16
#define TAUX_REACTION_A 0.04f
#define TAUX_REACTION_I (TAUX_REACTION_A / 200)
#define TAUX_RESORPTION_A 0.06f
#define TAUX_RESORPTION_I TAUX_RESORPTION_A
// Lampe à lave
//#define VITESSE_DIFFUSION_A 50
//#define VITESSE_DIFFUSION_I 55
// Léopard revisité
//#define VITESSE_DIFFUSION_A 2
//#define VITESSE_DIFFUSION_I 22
// Années yeahyeah
#define VITESSE_DIFFUSION_A 5
#define VITESSE_DIFFUSION_I 10
#define TAUX_DIFFUSION_A 0.065f
#define TAUX_DIFFUSION_I 0.04f
#define SEUIL 130
__global__ void color(float *t, uchar4 *buf) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < WIDTH && y < HEIGHT) {
int offset = y * WIDTH + x;
float t3 = t[offset];
float r, g, b;
switch(VITESSE_DIFFUSION_A){
case 5 :
if (t3 < SEUIL) {
r = 255; g = 0; b = 127;
} else {
r = 0; g = 47; b = 167;
}
break;
case 2 :
if (t3 < SEUIL) {
r = 240; g = 195; b = 0;
} else {
r = 63; g = 34; b = 4;
}
break;
case 50 :
if (t3 < SEUIL) {
r = 128; g = 0; b = 128;
} else {
r = 223; g = 109; b = 20;
}
break;
}
buf[offset].x = r;
buf[offset].y = g;
buf[offset].z = b;
buf[offset].w = 255;
}
}
__global__ void reaction(float *a, float *a1, float *i, float *i1) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < WIDTH && y < HEIGHT) {
int offset = y * WIDTH + x;
// A est catalysé par A et inhibé par I
a1[offset] = a[offset] + TAUX_REACTION_A * a[offset] * a[offset] / i[offset];
// I est catalysé par A
i1[offset] = i[offset] + TAUX_REACTION_I * a[offset] * a[offset];
// la réaction consomme une certaine quantité de A et de I
a1[offset] = (1 - TAUX_RESORPTION_A) * a1[offset];
i1[offset] = (1 - TAUX_RESORPTION_I) * i1[offset];
}
}
__global__ void diffusion(float *grille, float taux_diffusion) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < WIDTH && y < HEIGHT) {
int offset = y * WIDTH + x;
int top = y == HEIGHT - 1 ? offset : offset + WIDTH;
int bottom = y == 0 ? offset : offset - WIDTH;
int left = x == 0 ? offset : offset - 1;
int right = x == WIDTH - 1 ? offset : offset + 1;
grille[offset] = (1 - 4 * taux_diffusion) * grille[offset] +
taux_diffusion * (grille[top] + grille[bottom] + grille[left] + grille[right]);
}
}
struct Data {
float *A;
float *A_next;
float *I;
float *I_next;
dim3 blocks;
dim3 threads;
};
void render_callback(uchar4 *buf, Data *data, int ticks) {
reaction<<<data->blocks, data->threads>>>(data->A, data->A_next, data->I, data->I_next);
reaction<<<data->blocks, data->threads>>>(data->A_next, data->A, data->I_next, data->I);
for (int s = 0; s < VITESSE_DIFFUSION_A; s++)
diffusion<<<data->blocks, data->threads>>>(data->A, TAUX_DIFFUSION_A);
for (int s = 0; s < VITESSE_DIFFUSION_I; s++)
diffusion<<<data->blocks, data->threads>>>(data->I, TAUX_DIFFUSION_I);
color<<<data->blocks, data->threads>>>(data->A_next, buf);
}
void clean_callback(Data *data) {
HANDLE_CUDA_ERR(cudaFree(data->A));
HANDLE_CUDA_ERR(cudaFree(data->I));
}
int main() {
Data data;
GPUBitmap bitmap(WIDTH, HEIGHT, &data, "Heat");
size_t size = WIDTH * HEIGHT * sizeof(float);
float *a_initial = (float *)calloc(WIDTH * HEIGHT, sizeof(float));
float *i_initial = (float *)calloc(WIDTH * HEIGHT, sizeof(float));
for (int y = 0; y < HEIGHT * WIDTH + WIDTH; y++) {
a_initial[y] = (rand() % 100) + 1;
i_initial[y] = (rand() % 100) + 1;
}
data.blocks = dim3((WIDTH + DIM - 1) / DIM, (HEIGHT + DIM - 1) / DIM);
data.threads = dim3(DIM, DIM);
HANDLE_CUDA_ERR(cudaMalloc(&data.A, size));
HANDLE_CUDA_ERR(cudaMalloc(&data.I, size));
HANDLE_CUDA_ERR(cudaMalloc(&data.A_next, size));
HANDLE_CUDA_ERR(cudaMalloc(&data.I_next, size));
HANDLE_CUDA_ERR(cudaMemcpy(data.A, a_initial, size, cudaMemcpyHostToDevice));
HANDLE_CUDA_ERR(cudaMemcpy(data.I, i_initial, size, cudaMemcpyHostToDevice));
bitmap.animate((void (*)(uchar4*, void*, int))render_callback, (void (*)(void*))clean_callback);
return 0;
} |
6d1560be18a5b1558b87cf7636775a3bbefaceb4.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ExcuteConstraint.cu
*
* *************** Notice ***************
* Auto Generated By ATPC on:2019-03-07 17:05:30
* Author: ZhangHui
*
*/
#include <iostream>
#include "./../ConstraintParser/ConstraintParameter.cuh"
#include "./../model/Coodinate.cuh"
#include "./../model/Interval.cuh"
#include "./../model/Priority.cuh"
#include "./../model/FullCoveredInfo.cuh"
#include "./../model/PredictValue.cuh"
#include "./../model/PredictValueWithOne.cuh"
#include "./../solver/type.h"
#include "./../solver/ATG.h"
#include "./../solver/PCATG.h"
#include "./../solver/ConstantValue.h"
#include "ExcuteConstraint.cuh"
#include "HardwareStrategy.cuh"
#include "ParallelATG.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.hpp"
#include "math_functions.h"
#include <stdio.h>
using namespace std;
/*
*
* */
/*
*
* 1
* 2if
*
* :
* getRuntimeValue_i_jij
* */
/*
* 0CUDA
* */
__device__ void getRuntimeValue_0_0(FloatType DOUBLE_doubleA , FloatType DOUBLE_doubleB , Coodinate* res )
{
res->y = ( DOUBLE_doubleA ) - ( 1.1000000000000001E+0 );
res->isCovered = (bool)(res->y != 0.f);
res->isValid = (bool)(isfinite(res->y));
return ;
}
/*
* 1CUDA
* */
__device__ void getRuntimeValue_0_1(FloatType DOUBLE_doubleA , FloatType DOUBLE_doubleB , Coodinate* res )
{
res->y = ( -1.0E+0 ) - ( DOUBLE_doubleB );
res->isCovered = (bool)(res->y >= 0.f);
res->isValid = (bool)(isfinite(res->y));
return ;
}
/*
* 2CUDA
* */
__device__ void getRuntimeValue_0_2(FloatType DOUBLE_doubleA , FloatType DOUBLE_doubleB , Coodinate* res )
{
res->y = ( -1.0E+0 ) - ( DOUBLE_doubleA );
res->isCovered = (bool)(res->y > 0.f);
res->isValid = (bool)(isfinite(res->y));
return ;
}
/*
* nm m*n
* */
/*
* 00
* */
__global__ void calaConstraint_0_0_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_0(dev_predictArray[i+base].x , dev_parameter[1] , dev_predictArray + i + base);
}
}
/*
* 01
* */
__global__ void calaConstraint_0_0_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_0(dev_parameter[0] , dev_predictArray[i+base].x , dev_predictArray + i + base);
}
}
/*
* 10
* */
__global__ void calaConstraint_0_1_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_1(dev_predictArray[i+base].x , dev_parameter[1] , dev_predictArray + i + base);
}
}
/*
* 11
* */
__global__ void calaConstraint_0_1_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_1(dev_parameter[0] , dev_predictArray[i+base].x , dev_predictArray + i + base);
}
}
/*
* 20
* */
__global__ void calaConstraint_0_2_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_2(dev_predictArray[i+base].x , dev_parameter[1] , dev_predictArray + i + base);
}
}
/*
* 21
* */
__global__ void calaConstraint_0_2_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_2(dev_parameter[0] , dev_predictArray[i+base].x , dev_predictArray + i + base);
}
}
/*
* ,CUDA
* */
void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col)
{
Block res = HardwareStrategy::getHardwareStrategy(col);
//
if(paraIndex == 0)
{
hipLaunchKernelGGL(( calaConstraint_0_0_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col);
hipLaunchKernelGGL(( calaConstraint_0_1_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col);
hipLaunchKernelGGL(( calaConstraint_0_2_0), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col);
}
else if(paraIndex == 1)
{
hipLaunchKernelGGL(( calaConstraint_0_0_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[0], dev_predictArray,dev_parameter,0*col,col);
hipLaunchKernelGGL(( calaConstraint_0_1_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[1], dev_predictArray,dev_parameter,1*col,col);
hipLaunchKernelGGL(( calaConstraint_0_2_1), dim3(res.NumOfBlock) , dim3(res.ThreadPreBlock) , 0 , ParallelATG::stream[2], dev_predictArray,dev_parameter,2*col,col);
}
else
{
cout<<"************ You Should Never Get Here. In Function Of: void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col)"<<endl;
}
//
ParallelATG::synStream();
}
/*
*
* */
__global__ void calaFeasibleSolution(FullCoveredInfo* dev_coveredInfo,Coodinate* dev_predictArray,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
dev_coveredInfo[i].index = i;
dev_coveredInfo[i].isCovered = dev_predictArray[i + 0*Size].isCovered && dev_predictArray[i + 1*Size].isCovered && dev_predictArray[i + 2*Size].isCovered;
dev_coveredInfo[i].isVaild = dev_predictArray[i + 0*Size].isValid && dev_predictArray[i + 1*Size].isValid && dev_predictArray[i + 2*Size].isValid;
dev_coveredInfo[i].vaildNum = (int)(dev_coveredInfo[i].isVaild == true);
}
}
/*
*
* */
__global__ void calaFinalIntervel(Interval* dev_finalIntervel,Interval* dev_interval,const int calaArraySize)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
bool condition = (i>=1) && (i<calaArraySize);
if(condition)
{
Interval* a0 = dev_interval + i + calaArraySize * 0;
Interval* a1 = dev_interval + i + calaArraySize * 1;
Interval* a2 = dev_interval + i + calaArraySize * 2;
FloatType left = a0->left;
left = fmaxf( left , a1->left);
left = fmaxf( left , a2->left);
FloatType right = a0->right;
right = fminf( right , a1->right);
right = fminf( right , a2->right);
bool hasIntervel = a0->hasIntervel && a1->hasIntervel && a2->hasIntervel;
dev_finalIntervel[i].left = left;
dev_finalIntervel[i].right = right;
dev_finalIntervel[i].hasIntervel = hasIntervel;
//
//printf("(%f , %f ) (%f , %f ) (%f , %f ) Final %d (%f , %f)\n",a1->left,a1->right,a2->left,a2->right,a3->left,a3->right,hasIntervel,left,right);
}
}
/*
* predct
* */
__global__ void generatePredictMat(Coodinate* dev_predictArray,PredictValueWithOne* dev_finalAllPredictValue,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if( i < Size )
{
dev_predictArray[i + 0*Size].x = dev_finalAllPredictValue[i].value;
dev_predictArray[i + 1*Size].x = dev_finalAllPredictValue[i].value;
dev_predictArray[i + 2*Size].x = dev_finalAllPredictValue[i].value;
}
}
/*
*
* */
__global__ void calaPriority(Priority* dev_priority,Coodinate* dev_calaArray,const int row,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if( i < Size )
{
FloatType pri = 0.0;
Coodinate* a0 = dev_calaArray + i + 0 * Size;
Coodinate* a1 = dev_calaArray + i + 1 * Size;
Coodinate* a2 = dev_calaArray + i + 2 * Size;
if(a0->isCovered==true) pri = pri + 1.f;
else if(a0->isValid==true) pri = pri + 1.f/(1.f+fabsf(a0->y));
if(a1->isCovered==true) pri = pri + 1.f;
else if(a1->isValid==true) pri = pri + 1.f/(1.f+fabsf(a1->y));
if(a2->isCovered==true) pri = pri + 1.f;
else if(a2->isValid==true) pri = pri + 1.f/(1.f+fabsf(a2->y));
dev_priority[i].priority = pri / (FloatType)row;
dev_priority[i].x = a0->x;
//
bool isOne = (a0->x == a1->x) && (a1->x == a2->x);
bool isCovered = a0->isCovered && a1->isCovered && a2->isCovered;
bool isValid= a0->isValid && a1->isValid && a2->isValid;
if(isCovered == true)
{
printf("Cala Prioruty Wrong,index:%d: (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , isOne:%d , isCovered:%d , isValid:%d \n",i,a0->x,a0->y,a0->isCovered,a0->isValid,a1->x,a1->y,a1->isCovered,a1->isValid,a2->x,a2->y,a2->isCovered,a2->isValid,isOne,isCovered,isValid);
}
}
}
| 6d1560be18a5b1558b87cf7636775a3bbefaceb4.cu | /*
* ExcuteConstraint.cu
*
* *************** Notice ***************
* Auto Generated By ATPC on:2019-03-07 17:05:30
* Author: ZhangHui
*
*/
#include <iostream>
#include "./../ConstraintParser/ConstraintParameter.cuh"
#include "./../model/Coodinate.cuh"
#include "./../model/Interval.cuh"
#include "./../model/Priority.cuh"
#include "./../model/FullCoveredInfo.cuh"
#include "./../model/PredictValue.cuh"
#include "./../model/PredictValueWithOne.cuh"
#include "./../solver/type.h"
#include "./../solver/ATG.h"
#include "./../solver/PCATG.h"
#include "./../solver/ConstantValue.h"
#include "ExcuteConstraint.cuh"
#include "HardwareStrategy.cuh"
#include "ParallelATG.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.hpp"
#include "math_functions.h"
#include <stdio.h>
using namespace std;
/*
* 这个文件是函数获取运行时刻的各种函数的声明
* */
/*
* 注意这里计算有两部的计算优化:
* 1)在计算运行时刻值的时候,顺便把子约束满足情况计算了
* 2)计算子约束的满足情况的时候没有使用if等判断分支结构,
* 同时使用到已经计算好的运行时刻值去减少复杂的浮点数计算过程
* 这里采用的编码函数命名编码规则是这样的:
* getRuntimeValue_i_j表示计算第i个析取范式的第j个约束的运行时刻值
* */
/*
* 第0个约束的CUDA计算函数
* */
__device__ void getRuntimeValue_0_0(FloatType DOUBLE_doubleA , FloatType DOUBLE_doubleB , Coodinate* res )
{
res->y = ( DOUBLE_doubleA ) - ( 1.1000000000000001E+0 );
res->isCovered = (bool)(res->y != 0.f);
res->isValid = (bool)(isfinite(res->y));
return ;
}
/*
* 第1个约束的CUDA计算函数
* */
__device__ void getRuntimeValue_0_1(FloatType DOUBLE_doubleA , FloatType DOUBLE_doubleB , Coodinate* res )
{
res->y = ( -1.0E+0 ) - ( DOUBLE_doubleB );
res->isCovered = (bool)(res->y >= 0.f);
res->isValid = (bool)(isfinite(res->y));
return ;
}
/*
* 第2个约束的CUDA计算函数
* */
__device__ void getRuntimeValue_0_2(FloatType DOUBLE_doubleA , FloatType DOUBLE_doubleB , Coodinate* res )
{
res->y = ( -1.0E+0 ) - ( DOUBLE_doubleA );
res->isCovered = (bool)(res->y > 0.f);
res->isValid = (bool)(isfinite(res->y));
return ;
}
/*
* 下面是所有的 约束 在 各个搜索方向 的获取运行时刻值的关系(假如n个约束m个搜索方向,那么一共 m*n 个函数)
* */
/*
* 第0个约束在第0个搜索方向的执行
* */
__global__ void calaConstraint_0_0_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_0(dev_predictArray[i+base].x , dev_parameter[1] , dev_predictArray + i + base);
}
}
/*
* 第0个约束在第1个搜索方向的执行
* */
__global__ void calaConstraint_0_0_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_0(dev_parameter[0] , dev_predictArray[i+base].x , dev_predictArray + i + base);
}
}
/*
* 第1个约束在第0个搜索方向的执行
* */
__global__ void calaConstraint_0_1_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_1(dev_predictArray[i+base].x , dev_parameter[1] , dev_predictArray + i + base);
}
}
/*
* 第1个约束在第1个搜索方向的执行
* */
__global__ void calaConstraint_0_1_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_1(dev_parameter[0] , dev_predictArray[i+base].x , dev_predictArray + i + base);
}
}
/*
* 第2个约束在第0个搜索方向的执行
* */
__global__ void calaConstraint_0_2_0(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_2(dev_predictArray[i+base].x , dev_parameter[1] , dev_predictArray + i + base);
}
}
/*
* 第2个约束在第1个搜索方向的执行
* */
__global__ void calaConstraint_0_2_1(Coodinate* dev_predictArray,FloatType* dev_parameter,const int base,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
getRuntimeValue_0_2(dev_parameter[0] , dev_predictArray[i+base].x , dev_predictArray + i + base);
}
}
/*
* 复合约束的并行计算模块,注意,这个函数还可以使用CUDA提供的流加速计算
* */
void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col)
{
Block res = HardwareStrategy::getHardwareStrategy(col);
//根据不同的搜索方向做判断
if(paraIndex == 0)
{
calaConstraint_0_0_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col);
calaConstraint_0_1_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col);
calaConstraint_0_2_0<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col);
}
else if(paraIndex == 1)
{
calaConstraint_0_0_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[0]>>>(dev_predictArray,dev_parameter,0*col,col);
calaConstraint_0_1_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[1]>>>(dev_predictArray,dev_parameter,1*col,col);
calaConstraint_0_2_1<<<res.NumOfBlock , res.ThreadPreBlock , 0 , ParallelATG::stream[2]>>>(dev_predictArray,dev_parameter,2*col,col);
}
else
{
cout<<"************ You Should Never Get Here. In Function Of: void calaRuntimeValue(int paraIndex,Coodinate* dev_predictArray,FloatType* dev_parameter,const int row,const int col)"<<endl;
}
//下面是流并行计算部分的同步
ParallelATG::synStream();
}
/*
* 这个核函数是为了寻找可行解
* */
__global__ void calaFeasibleSolution(FullCoveredInfo* dev_coveredInfo,Coodinate* dev_predictArray,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < Size)
{
dev_coveredInfo[i].index = i;
dev_coveredInfo[i].isCovered = dev_predictArray[i + 0*Size].isCovered && dev_predictArray[i + 1*Size].isCovered && dev_predictArray[i + 2*Size].isCovered;
dev_coveredInfo[i].isVaild = dev_predictArray[i + 0*Size].isValid && dev_predictArray[i + 1*Size].isValid && dev_predictArray[i + 2*Size].isValid;
dev_coveredInfo[i].vaildNum = (int)(dev_coveredInfo[i].isVaild == true);
}
}
/*
* 就是区间交运算的计算
* */
__global__ void calaFinalIntervel(Interval* dev_finalIntervel,Interval* dev_interval,const int calaArraySize)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
bool condition = (i>=1) && (i<calaArraySize);
if(condition)
{
Interval* a0 = dev_interval + i + calaArraySize * 0;
Interval* a1 = dev_interval + i + calaArraySize * 1;
Interval* a2 = dev_interval + i + calaArraySize * 2;
FloatType left = a0->left;
left = fmaxf( left , a1->left);
left = fmaxf( left , a2->left);
FloatType right = a0->right;
right = fminf( right , a1->right);
right = fminf( right , a2->right);
bool hasIntervel = a0->hasIntervel && a1->hasIntervel && a2->hasIntervel;
dev_finalIntervel[i].left = left;
dev_finalIntervel[i].right = right;
dev_finalIntervel[i].hasIntervel = hasIntervel;
//这里可以自行添加打印获取区间交运算的结果
//printf("(%f , %f ) (%f , %f ) (%f , %f ) Final %d (%f , %f)\n",a1->left,a1->right,a2->left,a2->right,a3->left,a3->right,hasIntervel,left,right);
}
}
/*
* 根据预测的序列生成predct矩阵
* */
__global__ void generatePredictMat(Coodinate* dev_predictArray,PredictValueWithOne* dev_finalAllPredictValue,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if( i < Size )
{
dev_predictArray[i + 0*Size].x = dev_finalAllPredictValue[i].value;
dev_predictArray[i + 1*Size].x = dev_finalAllPredictValue[i].value;
dev_predictArray[i + 2*Size].x = dev_finalAllPredictValue[i].value;
}
}
/*
* 并行计算所有的预测解向量的优先级
* */
__global__ void calaPriority(Priority* dev_priority,Coodinate* dev_calaArray,const int row,const int Size)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if( i < Size )
{
FloatType pri = 0.0;
Coodinate* a0 = dev_calaArray + i + 0 * Size;
Coodinate* a1 = dev_calaArray + i + 1 * Size;
Coodinate* a2 = dev_calaArray + i + 2 * Size;
if(a0->isCovered==true) pri = pri + 1.f;
else if(a0->isValid==true) pri = pri + 1.f/(1.f+fabsf(a0->y));
if(a1->isCovered==true) pri = pri + 1.f;
else if(a1->isValid==true) pri = pri + 1.f/(1.f+fabsf(a1->y));
if(a2->isCovered==true) pri = pri + 1.f;
else if(a2->isValid==true) pri = pri + 1.f/(1.f+fabsf(a2->y));
dev_priority[i].priority = pri / (FloatType)row;
dev_priority[i].x = a0->x;
//下面是测试代码
bool isOne = (a0->x == a1->x) && (a1->x == a2->x);
bool isCovered = a0->isCovered && a1->isCovered && a2->isCovered;
bool isValid= a0->isValid && a1->isValid && a2->isValid;
if(isCovered == true)
{
printf("Cala Prioruty Wrong,index:%d: (%f,%f,%d,%d) , (%f,%f,%d,%d) , (%f,%f,%d,%d) , isOne:%d , isCovered:%d , isValid:%d \n",i,a0->x,a0->y,a0->isCovered,a0->isValid,a1->x,a1->y,a1->isCovered,a1->isValid,a2->x,a2->y,a2->isCovered,a2->isValid,isOne,isCovered,isValid);
}
}
}
|
1fd4416aac8b35d7723d059168c87ee4a611517d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <iostream>
#include <cmath>
#include "float32.h"
#define IN_DATA_BYTES (IN_SIZE*sizeof(dtype))
#define OUT_DATA_BYTES (OUT_SIZE*sizeof(dtype))
//function to print out error message from cuDNN calls
#define checkCUDNN(exp) \
{ \
cudnnStatus_t status = (exp); \
if(status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
float getError(float dev, float ref) {
if (ref > 1.0 || ref < -1.0)
return (dev - ref) / ref;
else
return dev - ref;
}
int main() {
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
cudnnActivationDescriptor_t activDesc;
checkCUDNN(cudnnCreateActivationDescriptor(&activDesc));
checkCUDNN(cudnnSetActivationDescriptor(activDesc,
CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN,
0.0));
cudnnTensorDescriptor_t in_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&in_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(in_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DTYPE,
1, 10,
1,
1));
cudnnTensorDescriptor_t out_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&out_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(out_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DTYPE,
1, 10,
1,
1));
stype alpha = 1.0f;
stype beta = 0.0f;
//GPU data pointers
dtype *in_data, *out_data;
//allocate arrays on GPU
hipMalloc(&in_data, IN_DATA_BYTES);
hipMalloc(&out_data, OUT_DATA_BYTES);
//copy input data to GPU array
hipMemcpy(in_data, input, IN_DATA_BYTES, hipMemcpyHostToDevice);
//initize output data on GPU
hipMemset(out_data, 0, OUT_DATA_BYTES);
checkCUDNN(cudnnActivationForward(cudnn,
activDesc,
&alpha,
in_desc,
in_data,
&beta,
out_desc,
out_data));
//allocate array on CPU for output tensor data
dtype *result = (dtype *) malloc(OUT_DATA_BYTES);
//copy output data from GPU
hipMemcpy(result, out_data, OUT_DATA_BYTES, hipMemcpyDeviceToHost);
//loop over and check that the forward pass outputs match expected results (exactly)
int err = 0;
for (int i = 0; i < OUT_SIZE; i++) {
float diff = getError(result[i], output[i]);
if (diff < 0) diff = -diff;
if (diff > 1e-05) {
std::cout << "Error! Expected " << output[i] << " got " << result[i] << " for idx " << i
<< std::endl;
std::cout << "diff " << diff << std::endl;
err++;
}
}
std::cout << "Forward finished with " << err << " errors" << std::endl;
//free CPU arrays
free(result);
//free GPU arrays
hipFree(in_data);
hipFree(out_data);
//free cuDNN descriptors
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyTensorDescriptor(out_desc);
cudnnDestroy(cudnn);
return 0;
}
| 1fd4416aac8b35d7723d059168c87ee4a611517d.cu | #include <cudnn.h>
#include <stdio.h>
#include <iostream>
#include <cmath>
#include "float32.h"
#define IN_DATA_BYTES (IN_SIZE*sizeof(dtype))
#define OUT_DATA_BYTES (OUT_SIZE*sizeof(dtype))
//function to print out error message from cuDNN calls
#define checkCUDNN(exp) \
{ \
cudnnStatus_t status = (exp); \
if(status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
float getError(float dev, float ref) {
if (ref > 1.0 || ref < -1.0)
return (dev - ref) / ref;
else
return dev - ref;
}
int main() {
cudnnHandle_t cudnn;
checkCUDNN(cudnnCreate(&cudnn));
cudnnActivationDescriptor_t activDesc;
checkCUDNN(cudnnCreateActivationDescriptor(&activDesc));
checkCUDNN(cudnnSetActivationDescriptor(activDesc,
CUDNN_ACTIVATION_RELU,
CUDNN_PROPAGATE_NAN,
0.0));
cudnnTensorDescriptor_t in_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&in_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(in_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DTYPE,
1, 10,
1,
1));
cudnnTensorDescriptor_t out_desc;
checkCUDNN(cudnnCreateTensorDescriptor(&out_desc));
checkCUDNN(cudnnSetTensor4dDescriptor(out_desc,
CUDNN_TENSOR_NCHW,
CUDNN_DTYPE,
1, 10,
1,
1));
stype alpha = 1.0f;
stype beta = 0.0f;
//GPU data pointers
dtype *in_data, *out_data;
//allocate arrays on GPU
cudaMalloc(&in_data, IN_DATA_BYTES);
cudaMalloc(&out_data, OUT_DATA_BYTES);
//copy input data to GPU array
cudaMemcpy(in_data, input, IN_DATA_BYTES, cudaMemcpyHostToDevice);
//initize output data on GPU
cudaMemset(out_data, 0, OUT_DATA_BYTES);
checkCUDNN(cudnnActivationForward(cudnn,
activDesc,
&alpha,
in_desc,
in_data,
&beta,
out_desc,
out_data));
//allocate array on CPU for output tensor data
dtype *result = (dtype *) malloc(OUT_DATA_BYTES);
//copy output data from GPU
cudaMemcpy(result, out_data, OUT_DATA_BYTES, cudaMemcpyDeviceToHost);
//loop over and check that the forward pass outputs match expected results (exactly)
int err = 0;
for (int i = 0; i < OUT_SIZE; i++) {
float diff = getError(result[i], output[i]);
if (diff < 0) diff = -diff;
if (diff > 1e-05) {
std::cout << "Error! Expected " << output[i] << " got " << result[i] << " for idx " << i
<< std::endl;
std::cout << "diff " << diff << std::endl;
err++;
}
}
std::cout << "Forward finished with " << err << " errors" << std::endl;
//free CPU arrays
free(result);
//free GPU arrays
cudaFree(in_data);
cudaFree(out_data);
//free cuDNN descriptors
cudnnDestroyTensorDescriptor(in_desc);
cudnnDestroyTensorDescriptor(out_desc);
cudnnDestroy(cudnn);
return 0;
}
|
062937e3c62c48937e129fcab68f554f4fc69ded.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cfloat>
#include "common.hpp"
using math_ops::Exp;
using math_ops::Log;
using math_ops::Log1p;
using math_ops::Pow;
using math_ops::Abs;
template<typename scalar_t>
__global__ void FocalLossForward(const int nthreads,
const scalar_t *logits,
const scalar_t *labels,
scalar_t *loss,
const scalar_t gamma, const scalar_t alpha) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const scalar_t one(1.);
const scalar_t zero(0.);
for (int i{tid}; i < nthreads; i+=stride) {
scalar_t lgt = logits[i];
scalar_t lb = labels[i];
scalar_t prob = one / (one + Exp(-lgt));
scalar_t log_p, log_1_p;
if (lgt >= zero) {
// log_p = -Log(one + Exp(-lgt));
log_p = -Log1p(Exp(-lgt));
log_1_p = -lgt + log_p;
} else {
// log_1_p = -Log(one + Exp(lgt));
log_1_p = -Log1p(Exp(lgt));
log_p = lgt + log_1_p;
}
scalar_t coeff = - Pow(Abs(lb - prob), gamma);
scalar_t ce = lb * alpha * log_p + (one - lb) * (one - alpha) * log_1_p;
loss[i] = coeff * ce;
}
}
template<typename scalar_t>
__global__ void FocalLossBackward(const int nthreads,
const scalar_t *logits,
const scalar_t *labels,
const scalar_t *grad_loss,
scalar_t *grad_logits,
const scalar_t gamma, const scalar_t alpha) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const scalar_t one(1.);
const scalar_t zero(0.);
for (int i{tid}; i < nthreads; i+=stride) {
scalar_t lgt = logits[i];
scalar_t lb = labels[i];
scalar_t prob = one / (one + Exp(-lgt));
scalar_t log_p, log_1_p;
if (lgt >= zero) {
// log_p = -Log(one + Exp(-lgt));
log_p = -Log1p(Exp(-lgt));
log_1_p = -lgt + log_p;
} else {
/* log_1_p = -Log(one + Exp(lgt)); */
log_1_p = -Log1p(Exp(lgt));
log_p = lgt + log_1_p;
}
scalar_t ce = lb * alpha * log_p + (one - lb) * (one - alpha) * log_1_p;
scalar_t coeff = - Pow(Abs(lb - prob), gamma);
scalar_t d_ce = lb * alpha - prob * (one - lb - alpha + scalar_t(2) * lb * alpha);
scalar_t d_coeff = gamma * Pow(Abs(lb - prob), gamma - one) * prob * (one - prob);
if (lb < prob) {
d_coeff = - d_coeff;
}
scalar_t grad = d_coeff * ce + d_ce * coeff;
grad_logits[i] = grad * grad_loss[i];
}
}
at::Tensor FocalLoss_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
AT_ASSERTM(labels.scalar_type() == logits.scalar_type(), "labels and logits should be half/float/double");
// allocate memory and cuda grid/block
auto losses = at::empty_like(logits);
const int num_samples = logits.numel();
dim3 grid(::min(
THCCeilDiv((int64_t)num_samples, (int64_t)512), (int64_t)4096
));
dim3 block(512);
if (losses.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return losses;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "focal forward", [&] {
hipLaunchKernelGGL(( FocalLossForward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<scalar_t>(),
losses.contiguous().data_ptr<scalar_t>(),
scalar_t(gamma), scalar_t(alpha)
);
});
AT_CUDA_CHECK(hipGetLastError());
return losses;
}
at::Tensor FocalLoss_backward_cuda(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
/* allocate memory and cuda grid/block */
auto grad_logits = at::empty_like(logits);
const int num_samples = logits.numel();
dim3 grid(::min(
THCCeilDiv((int64_t)num_samples, (int64_t)512), (int64_t)4096
));
dim3 block(512);
if (grad_logits.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_logits;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "focal backwrd", [&] {
hipLaunchKernelGGL(( FocalLossBackward<scalar_t>), dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
num_samples,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
scalar_t(gamma), scalar_t(alpha)
);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_logits;
}
// python inferface
at::Tensor FocalLoss_forward(const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this focal loss only support gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return FocalLoss_forward_cuda(logits, labels, gamma, alpha);
}
at::Tensor FocalLoss_backward(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
// TODO: try AT_ASSERTM
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this focal loss only support gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return FocalLoss_backward_cuda(grad, logits, labels, gamma, alpha);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("focalloss_forward", &FocalLoss_forward, "focal loss forward");
m.def("focalloss_backward", &FocalLoss_backward, "focal loss backward");
}
| 062937e3c62c48937e129fcab68f554f4fc69ded.cu |
#include <torch/extension.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cfloat>
#include "common.hpp"
using math_ops::Exp;
using math_ops::Log;
using math_ops::Log1p;
using math_ops::Pow;
using math_ops::Abs;
template<typename scalar_t>
__global__ void FocalLossForward(const int nthreads,
const scalar_t *logits,
const scalar_t *labels,
scalar_t *loss,
const scalar_t gamma, const scalar_t alpha) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const scalar_t one(1.);
const scalar_t zero(0.);
for (int i{tid}; i < nthreads; i+=stride) {
scalar_t lgt = logits[i];
scalar_t lb = labels[i];
scalar_t prob = one / (one + Exp(-lgt));
scalar_t log_p, log_1_p;
if (lgt >= zero) {
// log_p = -Log(one + Exp(-lgt));
log_p = -Log1p(Exp(-lgt));
log_1_p = -lgt + log_p;
} else {
// log_1_p = -Log(one + Exp(lgt));
log_1_p = -Log1p(Exp(lgt));
log_p = lgt + log_1_p;
}
scalar_t coeff = - Pow(Abs(lb - prob), gamma);
scalar_t ce = lb * alpha * log_p + (one - lb) * (one - alpha) * log_1_p;
loss[i] = coeff * ce;
}
}
template<typename scalar_t>
__global__ void FocalLossBackward(const int nthreads,
const scalar_t *logits,
const scalar_t *labels,
const scalar_t *grad_loss,
scalar_t *grad_logits,
const scalar_t gamma, const scalar_t alpha) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
const scalar_t one(1.);
const scalar_t zero(0.);
for (int i{tid}; i < nthreads; i+=stride) {
scalar_t lgt = logits[i];
scalar_t lb = labels[i];
scalar_t prob = one / (one + Exp(-lgt));
scalar_t log_p, log_1_p;
if (lgt >= zero) {
// log_p = -Log(one + Exp(-lgt));
log_p = -Log1p(Exp(-lgt));
log_1_p = -lgt + log_p;
} else {
/* log_1_p = -Log(one + Exp(lgt)); */
log_1_p = -Log1p(Exp(lgt));
log_p = lgt + log_1_p;
}
scalar_t ce = lb * alpha * log_p + (one - lb) * (one - alpha) * log_1_p;
scalar_t coeff = - Pow(Abs(lb - prob), gamma);
scalar_t d_ce = lb * alpha - prob * (one - lb - alpha + scalar_t(2) * lb * alpha);
scalar_t d_coeff = gamma * Pow(Abs(lb - prob), gamma - one) * prob * (one - prob);
if (lb < prob) {
d_coeff = - d_coeff;
}
scalar_t grad = d_coeff * ce + d_ce * coeff;
grad_logits[i] = grad * grad_loss[i];
}
}
at::Tensor FocalLoss_forward_cuda(const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
AT_ASSERTM(labels.scalar_type() == logits.scalar_type(), "labels and logits should be half/float/double");
// allocate memory and cuda grid/block
auto losses = at::empty_like(logits);
const int num_samples = logits.numel();
dim3 grid(std::min(
THCCeilDiv((int64_t)num_samples, (int64_t)512), (int64_t)4096
));
dim3 block(512);
if (losses.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return losses;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "focal forward", [&] {
FocalLossForward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<scalar_t>(),
losses.contiguous().data_ptr<scalar_t>(),
scalar_t(gamma), scalar_t(alpha)
);
});
AT_CUDA_CHECK(cudaGetLastError());
return losses;
}
at::Tensor FocalLoss_backward_cuda(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
// CHECK type and shape
AT_ASSERTM(logits.device().type() == c10::kCUDA, "logits should be cuda");
AT_ASSERTM(labels.device().type() == c10::kCUDA, "labels should be cuda");
/* allocate memory and cuda grid/block */
auto grad_logits = at::empty_like(logits);
const int num_samples = logits.numel();
dim3 grid(std::min(
THCCeilDiv((int64_t)num_samples, (int64_t)512), (int64_t)4096
));
dim3 block(512);
if (grad_logits.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_logits;
}
// call kernel
AT_DISPATCH_FLOATING_TYPES_AND_HALF(logits.scalar_type(), "focal backwrd", [&] {
FocalLossBackward<scalar_t><<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
num_samples,
logits.contiguous().data_ptr<scalar_t>(),
labels.contiguous().data_ptr<scalar_t>(),
grad.contiguous().data_ptr<scalar_t>(),
grad_logits.contiguous().data_ptr<scalar_t>(),
scalar_t(gamma), scalar_t(alpha)
);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_logits;
}
// python inferface
at::Tensor FocalLoss_forward(const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this focal loss only support gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return FocalLoss_forward_cuda(logits, labels, gamma, alpha);
}
at::Tensor FocalLoss_backward(const at::Tensor &grad,
const at::Tensor &logits,
const at::Tensor &labels,
const float gamma,
const float alpha) {
// TODO: try AT_ASSERTM
if ((logits.device().type() != c10::kCUDA) || (labels.device().type() != c10::kCUDA)) {
AT_ERROR("this focal loss only support gpu mode\n");
}
at::DeviceGuard guard(logits.device());
return FocalLoss_backward_cuda(grad, logits, labels, gamma, alpha);
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("focalloss_forward", &FocalLoss_forward, "focal loss forward");
m.def("focalloss_backward", &FocalLoss_backward, "focal loss backward");
}
|
1125e5a543b9bfdece7d50bf9254612839bc3778.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <vector>
#include <mpi.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <chrono>
int main()
{
MPI_Init(NULL, NULL);
int n, id;
MPI_Comm_size(MPI_COMM_WORLD, &n);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
const size_t size_tot = 1024*1024*1024;
const size_t size_max = size_tot / n;
// CPU TEST
std::vector<double> a_cpu_in (size_tot);
std::vector<double> a_cpu_out(size_tot);
std::fill(a_cpu_in.begin(), a_cpu_in.end(), id);
std::cout << id << ": Starting CPU all-to-all\n";
auto time_start = std::chrono::high_resolution_clock::now();
MPI_Alltoall(
a_cpu_in .data(), size_max, MPI_DOUBLE,
a_cpu_out.data(), size_max, MPI_DOUBLE,
MPI_COMM_WORLD);
auto time_end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration<double, std::milli>(time_end-time_start).count();
std::cout << id << ": Finished CPU all-to-all in " << std::to_string(duration) << " (ms)\n";
// GPU TEST
int id_local = id % 4;
hipSetDevice(id_local);
double* a_gpu_in;
double* a_gpu_out;
hipMalloc((void **)&a_gpu_in , size_tot * sizeof(double));
hipMalloc((void **)&a_gpu_out, size_tot * sizeof(double));
hipMemcpy(a_gpu_in, a_cpu_in.data(), size_tot*sizeof(double), hipMemcpyHostToDevice);
int id_gpu;
hipGetDevice(&id_gpu);
std::cout << id << ", " << id_local << ", " << id_gpu << ": Starting GPU all-to-all\n";
time_start = std::chrono::high_resolution_clock::now();
MPI_Alltoall(
a_gpu_in , size_max, MPI_DOUBLE,
a_gpu_out, size_max, MPI_DOUBLE,
MPI_COMM_WORLD);
time_end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration<double, std::milli>(time_end-time_start).count();
std::cout << id << ", " << id_local << ", " << id_gpu << ": Finished GPU all-to-all in " << std::to_string(duration) << " (ms)\n";
MPI_Finalize();
return 0;
}
| 1125e5a543b9bfdece7d50bf9254612839bc3778.cu | #include <iostream>
#include <vector>
#include <mpi.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <chrono>
int main()
{
MPI_Init(NULL, NULL);
int n, id;
MPI_Comm_size(MPI_COMM_WORLD, &n);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
const size_t size_tot = 1024*1024*1024;
const size_t size_max = size_tot / n;
// CPU TEST
std::vector<double> a_cpu_in (size_tot);
std::vector<double> a_cpu_out(size_tot);
std::fill(a_cpu_in.begin(), a_cpu_in.end(), id);
std::cout << id << ": Starting CPU all-to-all\n";
auto time_start = std::chrono::high_resolution_clock::now();
MPI_Alltoall(
a_cpu_in .data(), size_max, MPI_DOUBLE,
a_cpu_out.data(), size_max, MPI_DOUBLE,
MPI_COMM_WORLD);
auto time_end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration<double, std::milli>(time_end-time_start).count();
std::cout << id << ": Finished CPU all-to-all in " << std::to_string(duration) << " (ms)\n";
// GPU TEST
int id_local = id % 4;
cudaSetDevice(id_local);
double* a_gpu_in;
double* a_gpu_out;
cudaMalloc((void **)&a_gpu_in , size_tot * sizeof(double));
cudaMalloc((void **)&a_gpu_out, size_tot * sizeof(double));
cudaMemcpy(a_gpu_in, a_cpu_in.data(), size_tot*sizeof(double), cudaMemcpyHostToDevice);
int id_gpu;
cudaGetDevice(&id_gpu);
std::cout << id << ", " << id_local << ", " << id_gpu << ": Starting GPU all-to-all\n";
time_start = std::chrono::high_resolution_clock::now();
MPI_Alltoall(
a_gpu_in , size_max, MPI_DOUBLE,
a_gpu_out, size_max, MPI_DOUBLE,
MPI_COMM_WORLD);
time_end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration<double, std::milli>(time_end-time_start).count();
std::cout << id << ", " << id_local << ", " << id_gpu << ": Finished GPU all-to-all in " << std::to_string(duration) << " (ms)\n";
MPI_Finalize();
return 0;
}
|
473ac28c160016cdc09ea226b202712499f95dbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
//Otimizando o uso de memria
__global__ void matrix_mult(float *a, float *b, float *c, float N){
//O uso do shared caracteriza que a variavel (no caso essa submatriz/sub bloco criado na meria compartilhada que a GPU tem dentro dela)
__shared__ float sub_a[N_sub][N_sub];
__shared__ float sub_b[N_sub][N_sub];
int block_x = blockIdx.x;
int block_y = blockIdx.y;
int thread_x = threadIdx.x;
int thread_y = threadIdx.y;
//Identifica a linha e coluna do elemento da matriz resposta (C) que ele vai calcular
int index_x = block_y * N_sub + thread_y; //linha
int index_y = block_x * N_sub + thread_x; //coluna
float index_z = 0;
//Loop na matriz a e na B pra calcular o elemento resultado que vai na C
for(int sub_block = 0; sub_block < N/N_sub; sub_block++){
//Calculando o valor do elemento do sub bloco(sub matriz) que so buscados na matriz a e na matriz b
//Isso feito dentro da memria compartilhada
sub_a[thread_y][thread_x] = a[index_x * N + sub_block * N_sub + thread_x];
sub_b[thread_y][thread_x] = b[(sub_block * N_sub + thread_y) * N + index_x];
__syncthreads();
for(int i = 0; i < N_sub; i++){
index_z += sub_a[thread_y][i] + sub_b[i][thread_x];
}
__syncthreads();
}
//Coloca o elemento resultado que foi calculado na sua posio na matriz resultado (C)
c[index_x * N + index_y] = index_z;
}
#define N_sub 32
#define N (1024*1024)
#define THREADS_PER_BLOCK 512 //Definimos 512 threads por bloco. Essa informao varia conforme a placa, ese valor padro pr definido pelo tipo da placa
int main(){
float *a, *b, *c; //Esto na memria ram/CPU
float *d_a, *d_b, *d_c; //Ponteiros para indexar a memria da GPU
float size = N * N * sizeof(float);
hipMalloc((void **) &d_a, size);
hipMalloc((void **) &d_b, size);
hipMalloc((void **) &d_c, size);
a = (float *) malloc(size);
b = (float *) malloc(size);
c = (float *) malloc(size);
for(int i = 0; i < N * N; i++){
a[i] = b[i] = i;
c[i] = 0;
}
//Copia os dados das variaveis da CPU para as variaveis da GPU
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Esq = configurando quantos blocos precisa. Faz-se um arredondamento da conta de quantos blocos so necessrios para processar de 512 em 512.
//Dir = Configurando quantas threads precisa
hipLaunchKernelGGL(( matrix_mult), dim3((N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK) , dim3(THREADS_PER_BLOCK) , 0, 0, d_a, d_b, d_c, N);
//Recupera os dados que foram processador na GPU para a CPU
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
printf( "c[0] = %d\n",c[0] );
printf( "c[%d] = %f\n",N-1, c[N-1] );
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 473ac28c160016cdc09ea226b202712499f95dbc.cu | #include <stdio.h>
#include <stdlib.h>
//Otimizando o uso de memória
__global__ void matrix_mult(float *a, float *b, float *c, float N){
//O uso do shared caracteriza que a variavel (no caso essa submatriz/sub bloco é criado na meória compartilhada que a GPU tem dentro dela)
__shared__ float sub_a[N_sub][N_sub];
__shared__ float sub_b[N_sub][N_sub];
int block_x = blockIdx.x;
int block_y = blockIdx.y;
int thread_x = threadIdx.x;
int thread_y = threadIdx.y;
//Identifica a linha e coluna do elemento da matriz resposta (C) que ele vai calcular
int index_x = block_y * N_sub + thread_y; //linha
int index_y = block_x * N_sub + thread_x; //coluna
float index_z = 0;
//Loop na matriz a e na B pra calcular o elemento resultado que vai na C
for(int sub_block = 0; sub_block < N/N_sub; sub_block++){
//Calculando o valor do elemento do sub bloco(sub matriz) que são buscados na matriz a e na matriz b
//Isso é feito dentro da memória compartilhada
sub_a[thread_y][thread_x] = a[index_x * N + sub_block * N_sub + thread_x];
sub_b[thread_y][thread_x] = b[(sub_block * N_sub + thread_y) * N + index_x];
__syncthreads();
for(int i = 0; i < N_sub; i++){
index_z += sub_a[thread_y][i] + sub_b[i][thread_x];
}
__syncthreads();
}
//Coloca o elemento resultado que foi calculado na sua posição na matriz resultado (C)
c[index_x * N + index_y] = index_z;
}
#define N_sub 32
#define N (1024*1024)
#define THREADS_PER_BLOCK 512 //Definimos 512 threads por bloco. Essa informação varia conforme a placa, ese valor é padrão pré definido pelo tipo da placa
int main(){
float *a, *b, *c; //Estão na memória ram/CPU
float *d_a, *d_b, *d_c; //Ponteiros para indexar a memória da GPU
float size = N * N * sizeof(float);
cudaMalloc((void **) &d_a, size);
cudaMalloc((void **) &d_b, size);
cudaMalloc((void **) &d_c, size);
a = (float *) malloc(size);
b = (float *) malloc(size);
c = (float *) malloc(size);
for(int i = 0; i < N * N; i++){
a[i] = b[i] = i;
c[i] = 0;
}
//Copia os dados das variaveis da CPU para as variaveis da GPU
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Esq = configurando quantos blocos precisa. Faz-se um arredondamento da conta de quantos blocos são necessários para processar de 512 em 512.
//Dir = Configurando quantas threads precisa
matrix_mult<<< (N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK , THREADS_PER_BLOCK >>>(d_a, d_b, d_c, N);
//Recupera os dados que foram processador na GPU para a CPU
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf( "c[0] = %d\n",c[0] );
printf( "c[%d] = %f\n",N-1, c[N-1] );
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
a391bfed98285442790d1a761662f74a0c719c5f.hip | // !!! This is a file automatically generated by hipify!!!
#if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
| a391bfed98285442790d1a761662f74a0c719c5f.cu | #if __CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ >= 2)
// generated by gen_cutlass_matrix_mul_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wuninitialized"
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#include "src/cuda/matrix_mul/fp32_simt/matrix_mul_float_simt_cutlass_wrapper.cuinl"
using LayoutA = cutlass::layout::RowMajor;
using LayoutB = cutlass::layout::ColumnMajor;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 32, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 32, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<float, 1, float, float>;
using Gemm = cutlass::gemm::device::GemmSplitKParallel<
float, LayoutA,
float, LayoutB,
float, cutlass::layout::RowMajor, float,
cutlass::arch::OpClassSimt, cutlass::arch::Sm50,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp>;
template void megdnn::cuda::cutlass_wrapper::cutlass_matrix_mul_wrapper<Gemm>(
const typename Gemm::ElementA* d_A, size_t lda,
const typename Gemm::ElementB* d_B, size_t ldb,
typename Gemm::ElementC* d_C, size_t ldc,
int* workspace,
cutlass::gemm::GemmCoord const& problem_size,
typename Gemm::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream, int split_k_slices);
#pragma GCC diagnostic pop
#endif
|
18c4d8cb85ae29a8843d5533ddec142fa4376bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
constexpr int THREADS = 1024;
__global__ void CustomAddMulDivKernel(float *input1, float *input2, float *output1, float *output2, float *output3,
size_t size) {
auto idx = blockIdx.x * THREADS + threadIdx.x;
if (idx < size) {
output1[idx] = input1[idx] + input2[idx];
output2[idx] = input1[idx] * input2[idx];
output3[idx] = input1[idx] / input2[idx];
}
}
extern "C" int CustomAddMulDiv(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes,
void *stream, void *extra) {
hipStream_t custream = static_cast<hipStream_t>(stream);
constexpr int OUTPUT_INDEX = 2;
constexpr int TOTAL_PARAM_NUM = 5;
// Users can add any check on their need. If check fails, user can return any value larger than 0 to safely exit.
// Return value not equal to 0 will cause MindSpore to stop computing and safely exit.
// This is to check if the num of parameters the same as what the user wants.
// There are two inputs and three outputs, so the nparam should be 5.
if (nparam != TOTAL_PARAM_NUM) {
return 1;
}
// This is to check if the type of parameters the same as what the user wants.
for (int i = 0; i < nparam; i++) {
if (strcmp(dtypes[i], "float32") != 0) {
return 2;
}
}
// input1's index is 0, input2's index is 1, output1's index is 2, output2's index is 3 and output3's index is 4
void *input1 = params[0];
void *input2 = params[1];
void *output1 = params[2];
void *output2 = params[3];
void *output3 = params[4];
size_t size = 1;
// Cumprod of output's shape to compute elements' num
for (int i = 0; i < ndims[OUTPUT_INDEX]; i++) {
size *= shapes[OUTPUT_INDEX][i];
}
int n = size / THREADS;
// Do the computation
hipLaunchKernelGGL(( CustomAddMulDivKernel), dim3(n + 1), dim3(THREADS), 0, custream, static_cast<float *>(input1), static_cast<float *>(input2),
static_cast<float *>(output1), static_cast<float *>(output2),
static_cast<float *>(output3), size);
// When return 0, MindSpore will continue to run if this kernel could launch successfully.
return 0;
}
| 18c4d8cb85ae29a8843d5533ddec142fa4376bfe.cu | /**
* Copyright 2021-2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
constexpr int THREADS = 1024;
__global__ void CustomAddMulDivKernel(float *input1, float *input2, float *output1, float *output2, float *output3,
size_t size) {
auto idx = blockIdx.x * THREADS + threadIdx.x;
if (idx < size) {
output1[idx] = input1[idx] + input2[idx];
output2[idx] = input1[idx] * input2[idx];
output3[idx] = input1[idx] / input2[idx];
}
}
extern "C" int CustomAddMulDiv(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes,
void *stream, void *extra) {
cudaStream_t custream = static_cast<cudaStream_t>(stream);
constexpr int OUTPUT_INDEX = 2;
constexpr int TOTAL_PARAM_NUM = 5;
// Users can add any check on their need. If check fails, user can return any value larger than 0 to safely exit.
// Return value not equal to 0 will cause MindSpore to stop computing and safely exit.
// This is to check if the num of parameters the same as what the user wants.
// There are two inputs and three outputs, so the nparam should be 5.
if (nparam != TOTAL_PARAM_NUM) {
return 1;
}
// This is to check if the type of parameters the same as what the user wants.
for (int i = 0; i < nparam; i++) {
if (strcmp(dtypes[i], "float32") != 0) {
return 2;
}
}
// input1's index is 0, input2's index is 1, output1's index is 2, output2's index is 3 and output3's index is 4
void *input1 = params[0];
void *input2 = params[1];
void *output1 = params[2];
void *output2 = params[3];
void *output3 = params[4];
size_t size = 1;
// Cumprod of output's shape to compute elements' num
for (int i = 0; i < ndims[OUTPUT_INDEX]; i++) {
size *= shapes[OUTPUT_INDEX][i];
}
int n = size / THREADS;
// Do the computation
CustomAddMulDivKernel<<<n + 1, THREADS, 0, custream>>>(static_cast<float *>(input1), static_cast<float *>(input2),
static_cast<float *>(output1), static_cast<float *>(output2),
static_cast<float *>(output3), size);
// When return 0, MindSpore will continue to run if this kernel could launch successfully.
return 0;
}
|
49c1be14f0a445f0e30d77119ce6d2f11334bfb7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_sqrt (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(sqrt)(x[offset_x + gid * stride_x]);
}
} | 49c1be14f0a445f0e30d77119ce6d2f11334bfb7.cu | #include "includes.h"
extern "C" {
#ifndef REAL
#define REAL float
#endif
#ifndef CAST
#define CAST(fun) fun ## f
#endif
#ifndef REAL2o3
#define REAL2o3 (REAL)0.6666666666666667
#endif
#ifndef REAL3o2
#define REAL3o2 (REAL)1.5
#endif
}
__global__ void vector_sqrt (const int n, const REAL* x, const int offset_x, const int stride_x, REAL* y, const int offset_y, const int stride_y) {
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
y[offset_y + gid * stride_y] = CAST(sqrt)(x[offset_x + gid * stride_x]);
}
} |
4047395dbc355a43990c8fc5b6156d9fb5ba629a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*
* Slightly modified to provide timing support
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// A helper macro to simplify handling cuda error checking
#define CUDA_ERROR( err, msg ) { \
if (err != hipSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, hipGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
/**
* Host version of vectorAdd
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__host__ void
vectorAdd_HOST(const float *A, const float *B, float *C, int numElements)
{
int i ;
for ( i = 0; i < numElements ; i ++)
C [i] = A [i] + B [i ];
}
/**
* SINGLE thread code that works on Device
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd_SINGLE_THREAD(const float *A, const float *B, float *C, int numElements)
{
int i ;
for ( i = 0; i < numElements ; i ++)
C [i] = A [i] + B [i ];
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
static void compare_results(const float *vector1, const float *vector2, int numElements)
{
for (int i = 0; i < numElements; ++i)
{
if (fabs(vector1[i] - vector2[i]) > 1e-5f)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit (EXIT_FAILURE);
}
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Create Host stopwatch timer
StopWatchInterface * timer = NULL ;
sdkCreateTimer (& timer );
double h_msecs ;
// Create Device timer event objects
hipEvent_t start , stop ;
float d_msecs ;
hipEventCreate (& start );
hipEventCreate (& stop ) ;
// Print the vector length to be used, and compute its size
// int numElements = 50000; // original
// int numElements = 50000000; // 50m
// int numElements = 100000000; // 100m
int numElements = 170000000; // 170m
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A B and C
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
// Allocate the host output vector that will contain the sum calculate by the Host
float *h_SUM = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || h_SUM == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialise the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
//
// Execute the vector addition on the Host and time it:
//
sdkStartTimer (& timer );
vectorAdd_HOST(h_A, h_B, h_SUM, numElements);
sdkStopTimer (& timer );
h_msecs = sdkGetTimerValue (& timer );
printf("Executed vector add of %d elements on the Host in = %.5fmSecs\n", numElements, h_msecs);
// Allocate the device input vector A, B and C
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
CUDA_ERROR(err, "Failed to allocate device vector A");
// If you prefer, you can combine the above two lines into a single one as follows:
// CUDA_ERROR(hipMalloc((void **)&d_A, size), "Failed to allocate device vector A");
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
CUDA_ERROR(err, "Failed to allocate device vector B");
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
CUDA_ERROR(err, "Failed to allocate device vector C");
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy vector A from host to device");
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy vector B from host to device");
//
// Execute the vector addition on the Device IN A SINGLE THREAD and time it:
//
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( vectorAdd_SINGLE_THREAD), dim3(1), dim3(1), 0, 0, d_A, d_B, d_C, numElements);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
// wait for device to finish
hipDeviceSynchronize();
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch vectorAdd kernel");
err = hipEventElapsedTime( &d_msecs, start, stop );
CUDA_ERROR(err, "Failed to get elapsed time");
printf("Executed vector add of %d elements on the Device in a SINGLE THREAD in = %.5fmSecs\n", numElements, d_msecs);
float d_msecs_single = d_msecs;
printf("d_msecs_single: %.5fmSecs\n", d_msecs_single);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy vector C from device to host");
// Verify that the result vector is correct
compare_results(h_SUM, h_C, numElements);
printf("Test PASSED\n");
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 32;
// Note this pattern, based on integer division, for rounding up
int blocksPerGrid = 1 + ((numElements - 1) / threadsPerBlock);
printf("Launching the CUDA kernel with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//
// Execute the vector addition on the Device in multiple threads and time it:
//
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
// wait for device to finish
hipDeviceSynchronize();
err = hipGetLastError();
CUDA_ERROR(err, "Failed to launch vectorAdd kernel");
err = hipEventElapsedTime( &d_msecs, start, stop );
CUDA_ERROR(err, "Failed to get elapsed time");
printf("Executed vector add of %d elements on the Device in %d blocks of %d threads in = %.5fmSecs\n",
numElements, blocksPerGrid, threadsPerBlock, d_msecs);
float d_msecs_parallel = d_msecs;
printf("d_msecs_parallel: %.5fmSecs\n", d_msecs_parallel);
printf("Task B:\n");
printf("Speedup rel. to host version: %.3fx\n", (h_msecs / d_msecs_parallel));
printf("Speedup rel. to single threaded device version: %.3fx\n", (d_msecs_single / d_msecs_parallel));
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy vector C from device to host");
// Verify that the result vector is correct
compare_results(h_SUM, h_C, numElements);
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
CUDA_ERROR(err, "Failed to free device vector A");
err = hipFree(d_B);
CUDA_ERROR(err, "Failed to free device vector B");
err = hipFree(d_C);
CUDA_ERROR(err, "Failed to free device vector C");
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Clean up the Host timer
sdkDeleteTimer (& timer );
// Clean up the Device timer event objects
hipEventDestroy ( start );
hipEventDestroy ( stop );
// Reset the device and exit
err = hipDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
printf("Done\n");
return 0;
}
| 4047395dbc355a43990c8fc5b6156d9fb5ba629a.cu | /**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*
* Slightly modified to provide timing support
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
// A helper macro to simplify handling cuda error checking
#define CUDA_ERROR( err, msg ) { \
if (err != cudaSuccess) {\
printf( "%s: %s in %s at line %d\n", msg, cudaGetErrorString( err ), __FILE__, __LINE__);\
exit( EXIT_FAILURE );\
}\
}
/**
* Host version of vectorAdd
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__host__ void
vectorAdd_HOST(const float *A, const float *B, float *C, int numElements)
{
int i ;
for ( i = 0; i < numElements ; i ++)
C [i] = A [i] + B [i ];
}
/**
* SINGLE thread code that works on Device
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd_SINGLE_THREAD(const float *A, const float *B, float *C, int numElements)
{
int i ;
for ( i = 0; i < numElements ; i ++)
C [i] = A [i] + B [i ];
}
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
static void compare_results(const float *vector1, const float *vector2, int numElements)
{
for (int i = 0; i < numElements; ++i)
{
if (fabs(vector1[i] - vector2[i]) > 1e-5f)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit (EXIT_FAILURE);
}
}
}
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Create Host stopwatch timer
StopWatchInterface * timer = NULL ;
sdkCreateTimer (& timer );
double h_msecs ;
// Create Device timer event objects
cudaEvent_t start , stop ;
float d_msecs ;
cudaEventCreate (& start );
cudaEventCreate (& stop ) ;
// Print the vector length to be used, and compute its size
// int numElements = 50000; // original
// int numElements = 50000000; // 50m
// int numElements = 100000000; // 100m
int numElements = 170000000; // 170m
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A B and C
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
// Allocate the host output vector that will contain the sum calculate by the Host
float *h_SUM = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL || h_SUM == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialise the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
//
// Execute the vector addition on the Host and time it:
//
sdkStartTimer (& timer );
vectorAdd_HOST(h_A, h_B, h_SUM, numElements);
sdkStopTimer (& timer );
h_msecs = sdkGetTimerValue (& timer );
printf("Executed vector add of %d elements on the Host in = %.5fmSecs\n", numElements, h_msecs);
// Allocate the device input vector A, B and C
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
CUDA_ERROR(err, "Failed to allocate device vector A");
// If you prefer, you can combine the above two lines into a single one as follows:
// CUDA_ERROR(cudaMalloc((void **)&d_A, size), "Failed to allocate device vector A");
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
CUDA_ERROR(err, "Failed to allocate device vector B");
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
CUDA_ERROR(err, "Failed to allocate device vector C");
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy vector A from host to device");
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
CUDA_ERROR(err, "Failed to copy vector B from host to device");
//
// Execute the vector addition on the Device IN A SINGLE THREAD and time it:
//
cudaEventRecord( start, 0 );
vectorAdd_SINGLE_THREAD<<<1, 1>>>(d_A, d_B, d_C, numElements);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
// wait for device to finish
cudaDeviceSynchronize();
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch vectorAdd kernel");
err = cudaEventElapsedTime( &d_msecs, start, stop );
CUDA_ERROR(err, "Failed to get elapsed time");
printf("Executed vector add of %d elements on the Device in a SINGLE THREAD in = %.5fmSecs\n", numElements, d_msecs);
float d_msecs_single = d_msecs;
printf("d_msecs_single: %.5fmSecs\n", d_msecs_single);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy vector C from device to host");
// Verify that the result vector is correct
compare_results(h_SUM, h_C, numElements);
printf("Test PASSED\n");
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 32;
// Note this pattern, based on integer division, for rounding up
int blocksPerGrid = 1 + ((numElements - 1) / threadsPerBlock);
printf("Launching the CUDA kernel with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
//
// Execute the vector addition on the Device in multiple threads and time it:
//
cudaEventRecord( start, 0 );
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
// wait for device to finish
cudaDeviceSynchronize();
err = cudaGetLastError();
CUDA_ERROR(err, "Failed to launch vectorAdd kernel");
err = cudaEventElapsedTime( &d_msecs, start, stop );
CUDA_ERROR(err, "Failed to get elapsed time");
printf("Executed vector add of %d elements on the Device in %d blocks of %d threads in = %.5fmSecs\n",
numElements, blocksPerGrid, threadsPerBlock, d_msecs);
float d_msecs_parallel = d_msecs;
printf("d_msecs_parallel: %.5fmSecs\n", d_msecs_parallel);
printf("Task B:\n");
printf("Speedup rel. to host version: %.3fx\n", (h_msecs / d_msecs_parallel));
printf("Speedup rel. to single threaded device version: %.3fx\n", (d_msecs_single / d_msecs_parallel));
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
CUDA_ERROR(err, "Failed to copy vector C from device to host");
// Verify that the result vector is correct
compare_results(h_SUM, h_C, numElements);
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
CUDA_ERROR(err, "Failed to free device vector A");
err = cudaFree(d_B);
CUDA_ERROR(err, "Failed to free device vector B");
err = cudaFree(d_C);
CUDA_ERROR(err, "Failed to free device vector C");
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Clean up the Host timer
sdkDeleteTimer (& timer );
// Clean up the Device timer event objects
cudaEventDestroy ( start );
cudaEventDestroy ( stop );
// Reset the device and exit
err = cudaDeviceReset();
CUDA_ERROR(err, "Failed to reset the device");
printf("Done\n");
return 0;
}
|
670cfb857f39f68612ba15a6f531aee950995d12.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
#include <math.h>
//#include <GL/glut.h>
//#include <GL/gl.h>
#include<stdio.h>
#include<stdlib.h>
#include <time.h>
#include <algorithm>
#include "timer.h"
static int imagewidth;
static int imageheight;
static int pixellength;
static unsigned char* pixeldata;
static int imagewidth1;
static int imageheight1;
static int pixellength1;
static unsigned char* pixeldata1;
#define N 710*512 //
#define blocks 710 //
#define threads 512 //
//CUDA kernel********************************************************************
__global__ void add(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
// gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
// gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
// gc[5120 * 6 + i * 6 + 5] = float(a[i]);
}
__global__ void add2(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
// gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
// gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
// gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
gc[5120 * 6 + i * 6 + 5] = float(a[i]);
}
float c[6 * N + 5120 * 6] = { 0.0 };
float f[6 * N + 5120 * 6] = { 0.0 };
int main(int argc, char* argv[]) {
clock_t start, finish;
//double totaltime;
start = clock();
//******************************************************************************
//
FILE* pfile = fopen("1.bmp", "rb");
if (pfile == 0) exit(0);
//
fseek(pfile, 0x0012, SEEK_SET);
fread(&imagewidth, sizeof(imagewidth), 1, pfile);
fread(&imageheight, sizeof(imageheight), 1, pfile);
//
pixellength = imagewidth * 3;
while (pixellength % 4 != 0)pixellength++;
pixellength *= imageheight;
//
pixeldata = (unsigned char*)malloc(pixellength);
if (pixeldata == 0) exit(0);
fseek(pfile, 54, SEEK_SET);
//cout<<pixellength<<endl;
fread(pixeldata, pixellength, 1, pfile);
int shen[N];
for (int i = 0; i <= N; i++)
shen[i] = pixeldata[3 * i];
//
fclose(pfile);
//******************************************************************************
//
FILE* pfile1 = fopen("2.bmp", "rb");
if (pfile1 == 0) exit(0);
//
fseek(pfile1, 0x0012, SEEK_SET);
fread(&imagewidth1, sizeof(imagewidth1), 1, pfile1);
fread(&imageheight1, sizeof(imageheight1), 1, pfile1);
//
pixellength1 = imagewidth1 * 3;
while (pixellength1 % 4 != 0)pixellength1++;
pixellength1 *= imageheight1;
//
pixeldata1 = (unsigned char*)malloc(pixellength1);
if (pixeldata1 == 0) exit(0);
fseek(pfile1, 54, SEEK_SET);
//cout<<pixellength<<endl;
fread(pixeldata1, pixellength1, 1, pfile1);
int red[N];
int green[N];
int blue[N];
for (int i = 0; i <= N; i++)
{
red[i] = pixeldata1[3 * i];
green[i] = pixeldata1[3 * i + 1];
blue[i] = pixeldata1[3 * i + 2];
}
//
fclose(pfile1);
//******************************************************************************
//
int num = 0;
for (int yo = 220; yo <= 390; yo++)//220,300
{
for (int xo = 212; xo <= 292; xo++)//212,292
{
if (shen[512 * yo + xo] == 0) //=0
{
for (int a = xo; a <= xo + 20; a++)
{
num++; // pixel
if (shen[a + 512 * yo] != 0)
{
break;
}
}
for (int r = 0; r<num; r++) //
shen[512 * yo + xo + r] = (shen[512 * yo + xo + r - 1] + shen[512 * yo + xo + r - 512]) / 2;
}
}
}
//******************************************************************************
//
int z1 = 0; int xbz = 0; int ybz = 0;
for (int y0 = 220; y0 <= 390; y0++)//220,300
{
for (int x0 = 212; x0 <= 292; x0++)//212,292
{
if (shen[y0 * 512 + x0]>z1)//512
{
xbz = x0;
ybz = y0;
z1 = shen[y0 * 512 + x0];//512
}
}
}
int x1 = xbz - 90;//90
int x2 = xbz + 90;
int y1 = ybz - 90;
int y2 = ybz + 90;
cout << xbz << " " << 711 - ybz << endl;//513
int s = 0; int n = 0, m = 0, j = 0, q = 0, k = 0;
for (int y = y1; y <= y2; y++)
{
for (int x = x1; x <= x2; x++)
{
n = shen[y * 512 + x];//512
m = shen[y * 512 + x + 1];
j = blue[y * 512 + x + 1024];//b
q = green[y * 512 + x + 1024];//g
k = red[y * 512 + x + 1024];//r
if (abs(n - m) >= 4)//4
//if(abs(m-n)>=5&&abs(m-n)<=20)
{
for (int p = 1; p <= (abs(n - m) - 1); p++)
{
c[s * 6] = float(j) / 256;
c[s * 6 + 1] = float(q) / 256;
c[s * 6 + 2] = float(k) / 256;
c[s * 6 + 3] = float(x);
c[s * 6 + 4] = float(y);
c[s * 6 + 5] = float(max(n, m) - p);
s++;
}
}
}
}
//CUDA*******************************************************************
struct stopwatch_t* timerA1 = NULL;
struct stopwatch_t* timerA2 = NULL;
struct stopwatch_t* timerA3 = NULL;
long double compA;
long double commA1;
long double commA2;
/* initialize timer */
stopwatch_init ();
timerA1 = stopwatch_create ();
timerA2 = stopwatch_create ();
timerA3 = stopwatch_create ();
int *dev_a = 0;
int *dev_r = 0;
int *dev_g = 0;
int *dev_b = 0;
float *dev_c = 0;
/*
int *dev_a2 = 0;
int *dev_r2 = 0;
int *dev_g2 = 0;
int *dev_b2 = 0;
float *dev_c2 = 0;
*/
int GPU_A = 0;
int GPU_B = 1;
hipSetDevice(GPU_A);
hipMalloc((void**)&dev_a, (N) * sizeof(int));
stopwatch_start (timerA1);
hipMalloc((void**)&dev_r, (N) * sizeof(int));
hipMalloc((void**)&dev_g, (N) * sizeof(int));
hipMalloc((void**)&dev_b, (N) * sizeof(int));
hipMalloc((void**)&dev_c, 6 * (N + 5120) * sizeof(float));
hipMemcpy(dev_a, shen, (N) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_r, red, (N) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_g, green, (N) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, blue, (N) * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, (N + 5120) * 6 * sizeof(float), hipMemcpyHostToDevice);
commA1 = stopwatch_stop (timerA1);
stopwatch_start (timerA2);
hipLaunchKernelGGL(( add) , dim3(blocks), dim3(threads) , 0, 0, dev_a, dev_r, dev_g, dev_b, dev_c);
compA = stopwatch_stop (timerA2);
stopwatch_start (timerA3);
hipMemcpy(f, dev_c, 6 * (N + 5120) * sizeof(float), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_r);
hipFree(dev_g);
hipFree(dev_b);
hipFree(dev_c);
commA2 = stopwatch_stop (timerA3) + commA1;
int accessible = 0;
hipDeviceCanAccessPeer(&accessible, GPU_B, GPU_A);
if(accessible){
hipSetDevice(GPU_B);
hipDeviceEnablePeerAccess(GPU_A,0);
hipLaunchKernelGGL(( add2) , dim3(blocks), dim3(threads) , 0, 0, dev_a, dev_r, dev_g, dev_b, dev_c);
}
printf ("Computation time on GPU_A is: %Lg secs\n", compA);
printf ("Communication time on GPU_A is: %Lg secs\n", commA2);
return 0;
}
| 670cfb857f39f68612ba15a6f531aee950995d12.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
#include <math.h>
//#include <GL/glut.h>
//#include <GL/gl.h>
#include<stdio.h>
#include<stdlib.h>
#include <time.h>
#include <algorithm>
#include "timer.h"
static int imagewidth;
static int imageheight;
static int pixellength;
static unsigned char* pixeldata;
static int imagewidth1;
static int imageheight1;
static int pixellength1;
static unsigned char* pixeldata1;
#define N 710*512 //图像分辨率
#define blocks 710 //圖像高度
#define threads 512 //圖像寬度
//CUDA kernel函數********************************************************************
__global__ void add(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
// gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
// gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
// gc[5120 * 6 + i * 6 + 5] = float(a[i]);
}
__global__ void add2(int *a, int *r, int *g, int *b, float *gc)
{
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
// gc[5120 * 6 + i * 6 ] = b[i] * 0.00390625;
//gc[5120 * 6 + i * 6 ] = float(b[i]) / 256;
// gc[5120 * 6 + i * 6 + 1] = g[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 1] = float(g[i]) / 256;
// gc[5120 * 6 + i * 6 + 2] = r[i] * 0.00390625;
//gc[5120 * 6 + i * 6 + 2] = float(r[i]) / 256;
gc[5120 * 6 + i * 6 + 3] = float(i - ((i>>9)<<9) ); // i%512
//gc[5120 * 6 + i * 6 + 3] = float(i % 512);
gc[5120 * 6 + i * 6 + 4] = float( i >> 9);
//gc[5120 * 6 + i * 6 + 4] = float((i - (i % 512)) / 512);
gc[5120 * 6 + i * 6 + 5] = float(a[i]);
}
float c[6 * N + 5120 * 6] = { 0.0 };
float f[6 * N + 5120 * 6] = { 0.0 };
int main(int argc, char* argv[]) {
clock_t start, finish;
//double totaltime;
start = clock();
//******************************************************************************
//讀深度圖
FILE* pfile = fopen("1.bmp", "rb");
if (pfile == 0) exit(0);
//讀取圖像大小
fseek(pfile, 0x0012, SEEK_SET);
fread(&imagewidth, sizeof(imagewidth), 1, pfile);
fread(&imageheight, sizeof(imageheight), 1, pfile);
//計算像素數據長度
pixellength = imagewidth * 3;
while (pixellength % 4 != 0)pixellength++;
pixellength *= imageheight;
//讀取像素數據
pixeldata = (unsigned char*)malloc(pixellength);
if (pixeldata == 0) exit(0);
fseek(pfile, 54, SEEK_SET);
//cout<<pixellength<<endl;
fread(pixeldata, pixellength, 1, pfile);
int shen[N];
for (int i = 0; i <= N; i++)
shen[i] = pixeldata[3 * i];
//關閉文件
fclose(pfile);
//******************************************************************************
//讀取亮度
FILE* pfile1 = fopen("2.bmp", "rb");
if (pfile1 == 0) exit(0);
//讀取圖片大小
fseek(pfile1, 0x0012, SEEK_SET);
fread(&imagewidth1, sizeof(imagewidth1), 1, pfile1);
fread(&imageheight1, sizeof(imageheight1), 1, pfile1);
//計算數據長度
pixellength1 = imagewidth1 * 3;
while (pixellength1 % 4 != 0)pixellength1++;
pixellength1 *= imageheight1;
//讀取像素數據
pixeldata1 = (unsigned char*)malloc(pixellength1);
if (pixeldata1 == 0) exit(0);
fseek(pfile1, 54, SEEK_SET);
//cout<<pixellength<<endl;
fread(pixeldata1, pixellength1, 1, pfile1);
int red[N];
int green[N];
int blue[N];
for (int i = 0; i <= N; i++)
{
red[i] = pixeldata1[3 * i];
green[i] = pixeldata1[3 * i + 1];
blue[i] = pixeldata1[3 * i + 2];
}
//關閉文件
fclose(pfile1);
//******************************************************************************
//修補圖片
int num = 0;
for (int yo = 220; yo <= 390; yo++)//220,300
{
for (int xo = 212; xo <= 292; xo++)//212,292
{
if (shen[512 * yo + xo] == 0) //如果深度=0 說明厝為了
{
for (int a = xo; a <= xo + 20; a++)
{
num++; //一行一行找 紀錄缺失的總pixel
if (shen[a + 512 * yo] != 0)
{
break;
}
}
for (int r = 0; r<num; r++) //對每一行 做線性修補
shen[512 * yo + xo + r] = (shen[512 * yo + xo + r - 1] + shen[512 * yo + xo + r - 512]) / 2;
}
}
}
//******************************************************************************
//深度圖缺失了
int z1 = 0; int xbz = 0; int ybz = 0;
for (int y0 = 220; y0 <= 390; y0++)//220,300
{
for (int x0 = 212; x0 <= 292; x0++)//212,292
{
if (shen[y0 * 512 + x0]>z1)//512
{
xbz = x0;
ybz = y0;
z1 = shen[y0 * 512 + x0];//512
}
}
}
int x1 = xbz - 90;//90
int x2 = xbz + 90;
int y1 = ybz - 90;
int y2 = ybz + 90;
cout << xbz << " " << 711 - ybz << endl;//513
int s = 0; int n = 0, m = 0, j = 0, q = 0, k = 0;
for (int y = y1; y <= y2; y++)
{
for (int x = x1; x <= x2; x++)
{
n = shen[y * 512 + x];//512
m = shen[y * 512 + x + 1];
j = blue[y * 512 + x + 1024];//b
q = green[y * 512 + x + 1024];//g
k = red[y * 512 + x + 1024];//r
if (abs(n - m) >= 4)//4
//if(abs(m-n)>=5&&abs(m-n)<=20)
{
for (int p = 1; p <= (abs(n - m) - 1); p++)
{
c[s * 6] = float(j) / 256;
c[s * 6 + 1] = float(q) / 256;
c[s * 6 + 2] = float(k) / 256;
c[s * 6 + 3] = float(x);
c[s * 6 + 4] = float(y);
c[s * 6 + 5] = float(max(n, m) - p);
s++;
}
}
}
}
//CUDA計算部分*******************************************************************
struct stopwatch_t* timerA1 = NULL;
struct stopwatch_t* timerA2 = NULL;
struct stopwatch_t* timerA3 = NULL;
long double compA;
long double commA1;
long double commA2;
/* initialize timer */
stopwatch_init ();
timerA1 = stopwatch_create ();
timerA2 = stopwatch_create ();
timerA3 = stopwatch_create ();
int *dev_a = 0;
int *dev_r = 0;
int *dev_g = 0;
int *dev_b = 0;
float *dev_c = 0;
/*
int *dev_a2 = 0;
int *dev_r2 = 0;
int *dev_g2 = 0;
int *dev_b2 = 0;
float *dev_c2 = 0;
*/
int GPU_A = 0;
int GPU_B = 1;
cudaSetDevice(GPU_A);
cudaMalloc((void**)&dev_a, (N) * sizeof(int));
stopwatch_start (timerA1);
cudaMalloc((void**)&dev_r, (N) * sizeof(int));
cudaMalloc((void**)&dev_g, (N) * sizeof(int));
cudaMalloc((void**)&dev_b, (N) * sizeof(int));
cudaMalloc((void**)&dev_c, 6 * (N + 5120) * sizeof(float));
cudaMemcpy(dev_a, shen, (N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_r, red, (N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_g, green, (N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, blue, (N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, (N + 5120) * 6 * sizeof(float), cudaMemcpyHostToDevice);
commA1 = stopwatch_stop (timerA1);
stopwatch_start (timerA2);
add <<< blocks, threads >>>(dev_a, dev_r, dev_g, dev_b, dev_c);
compA = stopwatch_stop (timerA2);
stopwatch_start (timerA3);
cudaMemcpy(f, dev_c, 6 * (N + 5120) * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_r);
cudaFree(dev_g);
cudaFree(dev_b);
cudaFree(dev_c);
commA2 = stopwatch_stop (timerA3) + commA1;
int accessible = 0;
cudaDeviceCanAccessPeer(&accessible, GPU_B, GPU_A);
if(accessible){
cudaSetDevice(GPU_B);
cudaDeviceEnablePeerAccess(GPU_A,0);
add2 <<< blocks, threads >>>(dev_a, dev_r, dev_g, dev_b, dev_c);
}
printf ("Computation time on GPU_A is: %Lg secs\n", compA);
printf ("Communication time on GPU_A is: %Lg secs\n", commA2);
return 0;
}
|
86ed352cc0e77e5fd56449f75ff5b6d1003ef8f4.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************************
Filename : cuda_ProcFunctions.cu
Authors : Kevin Wong, Yifan Jian, Marinko Sarunic
Published : January 30th, 2014
Copyright (C) 2014 Biomedical Optics Research Group - Simon Fraser University
This software contains source code provided by NVIDIA Corporation.
This file is part of a free software. Details of this software has been described
in the paper titled:
"GPU Accelerated OCT Processing at Megahertz Axial Scan Rate and High Resolution Video
Rate Volumetric Rendering"
Please refer to this paper for further information about this software. Redistribution
and modification of this code is restricted to academic purposes ONLY, provided that
the following conditions are met:
- Redistribution of this code must retain the above copyright notice, this list of
conditions and the following disclaimer
- Any use, disclosure, reproduction, or redistribution of this software outside of
academic purposes is strictly prohibited
*DISCLAIMER*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
**********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <hip/hip_runtime.h> //Include the general CUDA Header file
#include <hipfft.h> //This is to perform FFT using CUDA
#include <cutil_inline.h> //This is to perform CUDA safecall functions
#include <hip/hip_runtime.h>
#include "cuda_ProcKernels.cu"
typedef float2 Complex;
int numThreadsPerBlock = 256;
bool mallocArrays = false;
bool dcAcquired = true;
bool lambdaCalculated = false;
bool dispPhaseCalculated = false;
enum bufferType { A, B };
bufferType processBufferID = A;
int frameWidth = 2048;
int frameHeight = 512;
int framesPerBuffer = 1;
int frameCounter = 0;
int bufferSize = frameWidth*frameHeight*framesPerBuffer;
int fftLengthMult = 1;
float minVal;
float maxVal;
float fundusCoeff = 4.0f;
float *dev_tempBuffer;
unsigned short *dev_uShortBufferA;
unsigned short *dev_uShortBufferB;
float *dcArray;
Complex *dev_FFTCompBuffer;
hipfftHandle fft_plan;
hipStream_t memcpyStream;
hipStream_t kernelStream;
/*************************************************************************************************************************/
/*************************************************************************************************************************/
void batchFFT(Complex *d_ComplexArray, hipStream_t processStream)
{
cufftSafeCall(
hipfftExecC2C(fft_plan,
(hipfftComplex *)d_ComplexArray,
(hipfftComplex *)d_ComplexArray,
HIPFFT_FORWARD)
);
}
/*************************************************************************************************************************
*************************************************************************************************************************/
void initProcCuda()
{
hipStreamCreate(&memcpyStream);
hipStreamCreate(&kernelStream);
cutilSafeCall( hipMalloc((void**)&dev_tempBuffer, bufferSize * sizeof(float)));
cutilSafeCall( hipMalloc((void**)&dev_uShortBufferA, bufferSize * sizeof(unsigned short)));
cutilSafeCall( hipMalloc((void**)&dev_uShortBufferB, bufferSize * sizeof(unsigned short)));
cutilSafeCall( hipMalloc((void**)&dcArray, frameWidth * sizeof(float)));
hipMemset(dcArray, 0, frameWidth * sizeof(float));
cutilSafeCall( hipMalloc((void**)&dev_FFTCompBuffer, bufferSize * fftLengthMult * sizeof(Complex)));
//Be sure to have the fft_width size be dynamic
cufftSafeCall( hipfftPlan1d( &fft_plan, fftLengthMult*frameWidth, HIPFFT_C2C, frameHeight * framesPerBuffer));
cufftSafeCall( hipfftSetStream(fft_plan, kernelStream));
}
extern "C" void initCudaProcVar( int frameWid,
int frameHei,
int framesPerBuff,
int fftLenMult)
{
frameWidth = frameWid;
frameHeight = frameHei;
framesPerBuffer = framesPerBuff;
bufferSize = frameWid*frameHei*framesPerBuff;
fftLengthMult = fftLenMult;
numThreadsPerBlock = 256;
//65535 is currently the maximum number of threads per kernel
while (bufferSize/numThreadsPerBlock > 65535) {
numThreadsPerBlock <<= 1;
if (numThreadsPerBlock>1024) {
printf("Error, Buffer Size is too large, CUDA is unable to handle this kernel size!\n");
printf("Exiting Program...");
exit(1);
}
}
minVal = 9.5;
maxVal = 12.5;
}
extern "C" void cleanUpCUDABuffers()
{
//Clean up all CUDA Buffers and arryays
cutilSafeCall(hipFree(dcArray));
cutilSafeCall(hipFree(dev_FFTCompBuffer));
cutilSafeCall(hipFree(dev_tempBuffer));
//Clean up FFT plans created
cufftSafeCall(hipfftDestroy(fft_plan));
//Clean up the streams created
hipStreamDestroy(memcpyStream);
hipStreamDestroy(kernelStream);
mallocArrays = false;
}
void subDC_and_PadComplex(unsigned short *dev_memcpyBuffer, Complex *dev_dstCompBuffer, float *dcArray, hipStream_t processStream)
{
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX( (bufferSize) / dimBlockX.x);
if (!dcAcquired) {
hipLaunchKernelGGL(( dcAcquireKernel), dim3(frameWidth/numThreadsPerBlock), dim3(numThreadsPerBlock), 0,processStream,
dev_memcpyBuffer, dcArray, frameWidth, frameHeight);
dcAcquired = true;
}
hipLaunchKernelGGL(( subDC_PadComplex), dim3(dimGridX), dim3(dimBlockX), 0, processStream,
dev_memcpyBuffer, dev_dstCompBuffer, dcArray, frameWidth, fftLengthMult*frameWidth);
}
void postFFTDownsize(Complex *d_ComplexArray, float *dev_processBuffer, int frames, int frameIdx, int reduction, hipStream_t processStream)
{
int newWidth = frameWidth/reduction;
int newHeight = frameHeight/reduction;
float coeff = 1.0f/(maxVal-minVal);
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(newWidth*newHeight*frames/ dimBlockX.x);
//Downsizing ModLogScale Kernel
//MLS = Modulus, Log, and Scaling
//This method of post FFT downsizes the data, and copies into buffer
//This allows a faster copy rate, full volume viewing range, but lower resolution
//As opposed to the other method which crops a portion of the whole volume
hipLaunchKernelGGL(( downsizeMLS), dim3(dimGridX), dim3(dimBlockX), 0, processStream,
dev_processBuffer, d_ComplexArray, frameWidth, frameHeight,
fftLengthMult*frameWidth, minVal, maxVal, coeff, frameIdx, reduction);
}
//The range var is a portion of the width far, eg width = 1024, a quarter of the width would be the range = 256
void postFFTCrop(Complex *d_ComplexArray, float *dev_processBuffer, int frames, int frameIdx, int offset, int range, hipStream_t processStream)
{
float coeff = 1.0f/(maxVal-minVal);
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(range*frameHeight*frames/ dimBlockX.x);
//MLS = Modulus, Log, and Scaling
//This method of post FFT crops out a certain portion of the data, and copies into buffer
//This method preserves resolution, but reduces the viewing range
//As opposed to the other method which downsizes the whole volume
hipLaunchKernelGGL(( cropMLS), dim3(dimGridX), dim3(dimBlockX), 0, processStream,
dev_processBuffer, d_ComplexArray, frameWidth, frameHeight,
fftLengthMult*frameWidth, minVal, maxVal, coeff, frameIdx, offset, range);
}
//This Function calls the kernel which averages the given number of frames into a single frame (B-scan)
extern "C" void frameAvg(float *dev_multiFrameBuff, float *dev_displayBuff, int width, int height, int numberOfFrames, int frameNum)
{
if (dev_multiFrameBuff==NULL) {
dev_multiFrameBuff = dev_tempBuffer;
}
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(width*height/ dimBlockX.x);
hipLaunchKernelGGL(( avgKernel), dim3(dimGridX), dim3(dimBlockX), 0, kernelStream,
dev_multiFrameBuff, dev_displayBuff, frameNum, numberOfFrames, width*height);
}
//This Kernel will copy one single frame to the display buffer
extern "C" void copySingleFrame(float *dev_multiFrameBuff, float *dev_displayBuff, int width, int height, int frameNum)
{
if (dev_multiFrameBuff==NULL) {
dev_multiFrameBuff = dev_tempBuffer;
}
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(width*height / dimBlockX.x);
hipLaunchKernelGGL(( copySingleFrameFloat), dim3(dimGridX), dim3(dimBlockX), 0, kernelStream,
dev_multiFrameBuff, dev_displayBuff, frameNum, width*height);
}
extern "C" void cudaPipeline( unsigned short *h_buffer,
float *dev_frameBuff,
int frameIdx,
int reduction, //This is used only for Downsizing
int offset, //This is used only for Cropping
int range) //This is used only for Cropping
{
//This kernel acts as a GPU synchronization kernel
//This synchronization prevents any data race conflicts
//This method of GPU Synchronization has proven to be
// the most effective method of synchronization
//DO NOT remove this kernel!
hipLaunchKernelGGL(( syncKernel), dim3(1),dim3(1), 0, 0, );
////
unsigned short *processBuffer;
unsigned short *memcpyBuffer;
if (!mallocArrays) {
initProcCuda();
mallocArrays = true;
}
if (dev_frameBuff==NULL) {
dev_frameBuff = dev_tempBuffer;
}
//Performing dual buffer processing
//One buffer for memcpy
//The other buffer for processing
if (processBufferID==A) {
processBuffer = dev_uShortBufferA;
memcpyBuffer = dev_uShortBufferB;
processBufferID = B;
} else if (processBufferID==B) {
processBuffer = dev_uShortBufferB;
memcpyBuffer = dev_uShortBufferA;
processBufferID = A;
}
//Memcpy data into one buffer
cutilSafeCall( hipMemcpyAsync((void *) memcpyBuffer, h_buffer, bufferSize*sizeof(unsigned short), hipMemcpyHostToDevice, memcpyStream));
subDC_and_PadComplex(processBuffer, dev_FFTCompBuffer, dcArray, kernelStream);
batchFFT(dev_FFTCompBuffer, kernelStream);
//This kernel must be general for 2D OCT, 3D OCT reduce and crop!
if (reduction==1) {
postFFTCrop(dev_FFTCompBuffer, dev_frameBuff, framesPerBuffer, frameIdx, offset, range, kernelStream);
} else {
postFFTDownsize(dev_FFTCompBuffer, dev_frameBuff, framesPerBuffer, frameIdx, reduction, kernelStream);
}
//Another synchronization call explicitly for the streams only
//This synchronization is a second safety measure over the syncKernel call
hipStreamSynchronize(memcpyStream);
}
extern "C" void cudaRenderFundus( float *dev_fundus, float *dev_volume, int width, int height, int depth, int idx, bool partialFundus)
{
//Can be up to 1024, but incredibly inefficient at 1024
//128 is the most optimum size for this kernel
int inputIdx = height*idx;
if (dev_volume == NULL) {
dev_volume = dev_tempBuffer;
inputIdx = 0;
}
const int blockSize = 128;
float scaleCoeff = fundusCoeff/(float)frameWidth;
int increment = depth;
while (height*increment>65535) {
increment >>= 1;
}
dim3 dimBlockX(blockSize);
dim3 dimGridX(height*increment);
if (partialFundus) {
hipLaunchKernelGGL(( renderFundus<blockSize>), dim3(dimGridX), dim3(dimBlockX), 0, kernelStream,
dev_volume, dev_fundus, width, scaleCoeff, inputIdx, height*idx);
} else {
for (int i=0; i<depth; i+=increment) {
hipLaunchKernelGGL(( renderFundus<blockSize>), dim3(dimGridX), dim3(dimBlockX), 0, kernelStream,
dev_volume, dev_fundus, width, scaleCoeff, height*i, height*i);
}
}
}
/*****************************************************************************************************************************/
/****************************************** Miscellaneous Functions For Adjustments *****************************************/
/*****************************************************************************************************************************/
/*****************************************************************************************************************************/
extern "C" void acquireDC()
{
dcAcquired = false;
}
/*****************************************************************************************************************************/
/*****************************************************************************************************************************/
/*****************************************************************************************************************************/
extern "C" void decreaseMinVal()
{
minVal -= 0.5f;
printf("New minVal is %0.1f", minVal);
printf("\n");
}
/*****************************************************************************************************************************/
extern "C" void increaseMinVal()
{
if (minVal==maxVal-1) {
printf("Error: minVal cannot be equal or greater than maxVal!\n");
printf("minVal is: %f, maxVal is: %f \n", minVal, maxVal);
} else {
minVal += 0.5f;
printf("New minVal is %0.1f", minVal);
printf("\n");
}
}
/*****************************************************************************************************************************/
extern "C" void increaseMaxVal()
{
maxVal += 0.5f;
printf("New maxVal is %0.1f", maxVal);
printf("\n");
}
/*****************************************************************************************************************************/
extern "C" void decreaseMaxVal()
{
if (maxVal==minVal+1) {
printf("Error: maxVal cannot be equal or less than than minVal!\n");
printf("minVal is: %f, maxVal is: %f \n", minVal, maxVal);
} else {
maxVal -= 0.5f;
printf("New maxVal is %0.1f", maxVal);
printf("\n");
}
}
/*****************************************************************************************************************************/
extern "C" void increaseFundusCoeff()
{
fundusCoeff += 0.5f;
printf("New fundusCoeff is %0.1f", fundusCoeff);
printf("\n");
}
/*****************************************************************************************************************************/
extern "C" void decreaseFundusCoeff()
{
if (fundusCoeff<=0) {
printf("Error: fundusCoeff cannot be less than 0!\n");
} else {
fundusCoeff -= 0.5f;
printf("New fundusCoeff is %0.1f", fundusCoeff);
printf("\n");
}
}
/*****************************************************************************************************************************/
| 86ed352cc0e77e5fd56449f75ff5b6d1003ef8f4.cu | /**********************************************************************************
Filename : cuda_ProcFunctions.cu
Authors : Kevin Wong, Yifan Jian, Marinko Sarunic
Published : January 30th, 2014
Copyright (C) 2014 Biomedical Optics Research Group - Simon Fraser University
This software contains source code provided by NVIDIA Corporation.
This file is part of a free software. Details of this software has been described
in the paper titled:
"GPU Accelerated OCT Processing at Megahertz Axial Scan Rate and High Resolution Video
Rate Volumetric Rendering"
Please refer to this paper for further information about this software. Redistribution
and modification of this code is restricted to academic purposes ONLY, provided that
the following conditions are met:
- Redistribution of this code must retain the above copyright notice, this list of
conditions and the following disclaimer
- Any use, disclosure, reproduction, or redistribution of this software outside of
academic purposes is strictly prohibited
*DISCLAIMER*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
**********************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h> //Include the general CUDA Header file
#include <cufft.h> //This is to perform FFT using CUDA
#include <cutil_inline.h> //This is to perform CUDA safecall functions
#include <cuda_runtime.h>
#include "cuda_ProcKernels.cu"
typedef float2 Complex;
int numThreadsPerBlock = 256;
bool mallocArrays = false;
bool dcAcquired = true;
bool lambdaCalculated = false;
bool dispPhaseCalculated = false;
enum bufferType { A, B };
bufferType processBufferID = A;
int frameWidth = 2048;
int frameHeight = 512;
int framesPerBuffer = 1;
int frameCounter = 0;
int bufferSize = frameWidth*frameHeight*framesPerBuffer;
int fftLengthMult = 1;
float minVal;
float maxVal;
float fundusCoeff = 4.0f;
float *dev_tempBuffer;
unsigned short *dev_uShortBufferA;
unsigned short *dev_uShortBufferB;
float *dcArray;
Complex *dev_FFTCompBuffer;
cufftHandle fft_plan;
cudaStream_t memcpyStream;
cudaStream_t kernelStream;
/*************************************************************************************************************************/
/*************************************************************************************************************************/
void batchFFT(Complex *d_ComplexArray, cudaStream_t processStream)
{
cufftSafeCall(
cufftExecC2C(fft_plan,
(cufftComplex *)d_ComplexArray,
(cufftComplex *)d_ComplexArray,
CUFFT_FORWARD)
);
}
/*************************************************************************************************************************
*************************************************************************************************************************/
void initProcCuda()
{
cudaStreamCreate(&memcpyStream);
cudaStreamCreate(&kernelStream);
cutilSafeCall( cudaMalloc((void**)&dev_tempBuffer, bufferSize * sizeof(float)));
cutilSafeCall( cudaMalloc((void**)&dev_uShortBufferA, bufferSize * sizeof(unsigned short)));
cutilSafeCall( cudaMalloc((void**)&dev_uShortBufferB, bufferSize * sizeof(unsigned short)));
cutilSafeCall( cudaMalloc((void**)&dcArray, frameWidth * sizeof(float)));
cudaMemset(dcArray, 0, frameWidth * sizeof(float));
cutilSafeCall( cudaMalloc((void**)&dev_FFTCompBuffer, bufferSize * fftLengthMult * sizeof(Complex)));
//Be sure to have the fft_width size be dynamic
cufftSafeCall( cufftPlan1d( &fft_plan, fftLengthMult*frameWidth, CUFFT_C2C, frameHeight * framesPerBuffer));
cufftSafeCall( cufftSetStream(fft_plan, kernelStream));
}
extern "C" void initCudaProcVar( int frameWid,
int frameHei,
int framesPerBuff,
int fftLenMult)
{
frameWidth = frameWid;
frameHeight = frameHei;
framesPerBuffer = framesPerBuff;
bufferSize = frameWid*frameHei*framesPerBuff;
fftLengthMult = fftLenMult;
numThreadsPerBlock = 256;
//65535 is currently the maximum number of threads per kernel
while (bufferSize/numThreadsPerBlock > 65535) {
numThreadsPerBlock <<= 1;
if (numThreadsPerBlock>1024) {
printf("Error, Buffer Size is too large, CUDA is unable to handle this kernel size!\n");
printf("Exiting Program...");
exit(1);
}
}
minVal = 9.5;
maxVal = 12.5;
}
extern "C" void cleanUpCUDABuffers()
{
//Clean up all CUDA Buffers and arryays
cutilSafeCall(cudaFree(dcArray));
cutilSafeCall(cudaFree(dev_FFTCompBuffer));
cutilSafeCall(cudaFree(dev_tempBuffer));
//Clean up FFT plans created
cufftSafeCall(cufftDestroy(fft_plan));
//Clean up the streams created
cudaStreamDestroy(memcpyStream);
cudaStreamDestroy(kernelStream);
mallocArrays = false;
}
void subDC_and_PadComplex(unsigned short *dev_memcpyBuffer, Complex *dev_dstCompBuffer, float *dcArray, cudaStream_t processStream)
{
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX( (bufferSize) / dimBlockX.x);
if (!dcAcquired) {
dcAcquireKernel<<<frameWidth/numThreadsPerBlock, numThreadsPerBlock, 0,processStream>>>
(dev_memcpyBuffer, dcArray, frameWidth, frameHeight);
dcAcquired = true;
}
subDC_PadComplex<<<dimGridX, dimBlockX, 0, processStream>>>
(dev_memcpyBuffer, dev_dstCompBuffer, dcArray, frameWidth, fftLengthMult*frameWidth);
}
void postFFTDownsize(Complex *d_ComplexArray, float *dev_processBuffer, int frames, int frameIdx, int reduction, cudaStream_t processStream)
{
int newWidth = frameWidth/reduction;
int newHeight = frameHeight/reduction;
float coeff = 1.0f/(maxVal-minVal);
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(newWidth*newHeight*frames/ dimBlockX.x);
//Downsizing ModLogScale Kernel
//MLS = Modulus, Log, and Scaling
//This method of post FFT downsizes the data, and copies into buffer
//This allows a faster copy rate, full volume viewing range, but lower resolution
//As opposed to the other method which crops a portion of the whole volume
downsizeMLS<<<dimGridX, dimBlockX, 0, processStream>>>
(dev_processBuffer, d_ComplexArray, frameWidth, frameHeight,
fftLengthMult*frameWidth, minVal, maxVal, coeff, frameIdx, reduction);
}
//The range var is a portion of the width far, eg width = 1024, a quarter of the width would be the range = 256
void postFFTCrop(Complex *d_ComplexArray, float *dev_processBuffer, int frames, int frameIdx, int offset, int range, cudaStream_t processStream)
{
float coeff = 1.0f/(maxVal-minVal);
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(range*frameHeight*frames/ dimBlockX.x);
//MLS = Modulus, Log, and Scaling
//This method of post FFT crops out a certain portion of the data, and copies into buffer
//This method preserves resolution, but reduces the viewing range
//As opposed to the other method which downsizes the whole volume
cropMLS<<<dimGridX, dimBlockX, 0, processStream>>>
(dev_processBuffer, d_ComplexArray, frameWidth, frameHeight,
fftLengthMult*frameWidth, minVal, maxVal, coeff, frameIdx, offset, range);
}
//This Function calls the kernel which averages the given number of frames into a single frame (B-scan)
extern "C" void frameAvg(float *dev_multiFrameBuff, float *dev_displayBuff, int width, int height, int numberOfFrames, int frameNum)
{
if (dev_multiFrameBuff==NULL) {
dev_multiFrameBuff = dev_tempBuffer;
}
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(width*height/ dimBlockX.x);
avgKernel<<<dimGridX, dimBlockX, 0, kernelStream>>>
(dev_multiFrameBuff, dev_displayBuff, frameNum, numberOfFrames, width*height);
}
//This Kernel will copy one single frame to the display buffer
extern "C" void copySingleFrame(float *dev_multiFrameBuff, float *dev_displayBuff, int width, int height, int frameNum)
{
if (dev_multiFrameBuff==NULL) {
dev_multiFrameBuff = dev_tempBuffer;
}
dim3 dimBlockX(numThreadsPerBlock);
dim3 dimGridX(width*height / dimBlockX.x);
copySingleFrameFloat<<<dimGridX, dimBlockX, 0, kernelStream>>>
(dev_multiFrameBuff, dev_displayBuff, frameNum, width*height);
}
extern "C" void cudaPipeline( unsigned short *h_buffer,
float *dev_frameBuff,
int frameIdx,
int reduction, //This is used only for Downsizing
int offset, //This is used only for Cropping
int range) //This is used only for Cropping
{
//This kernel acts as a GPU synchronization kernel
//This synchronization prevents any data race conflicts
//This method of GPU Synchronization has proven to be
// the most effective method of synchronization
//DO NOT remove this kernel!
syncKernel<<<1,1>>>();
////
unsigned short *processBuffer;
unsigned short *memcpyBuffer;
if (!mallocArrays) {
initProcCuda();
mallocArrays = true;
}
if (dev_frameBuff==NULL) {
dev_frameBuff = dev_tempBuffer;
}
//Performing dual buffer processing
//One buffer for memcpy
//The other buffer for processing
if (processBufferID==A) {
processBuffer = dev_uShortBufferA;
memcpyBuffer = dev_uShortBufferB;
processBufferID = B;
} else if (processBufferID==B) {
processBuffer = dev_uShortBufferB;
memcpyBuffer = dev_uShortBufferA;
processBufferID = A;
}
//Memcpy data into one buffer
cutilSafeCall( cudaMemcpyAsync((void *) memcpyBuffer, h_buffer, bufferSize*sizeof(unsigned short), cudaMemcpyHostToDevice, memcpyStream));
subDC_and_PadComplex(processBuffer, dev_FFTCompBuffer, dcArray, kernelStream);
batchFFT(dev_FFTCompBuffer, kernelStream);
//This kernel must be general for 2D OCT, 3D OCT reduce and crop!
if (reduction==1) {
postFFTCrop(dev_FFTCompBuffer, dev_frameBuff, framesPerBuffer, frameIdx, offset, range, kernelStream);
} else {
postFFTDownsize(dev_FFTCompBuffer, dev_frameBuff, framesPerBuffer, frameIdx, reduction, kernelStream);
}
//Another synchronization call explicitly for the streams only
//This synchronization is a second safety measure over the syncKernel call
cudaStreamSynchronize(memcpyStream);
}
extern "C" void cudaRenderFundus( float *dev_fundus, float *dev_volume, int width, int height, int depth, int idx, bool partialFundus)
{
//Can be up to 1024, but incredibly inefficient at 1024
//128 is the most optimum size for this kernel
int inputIdx = height*idx;
if (dev_volume == NULL) {
dev_volume = dev_tempBuffer;
inputIdx = 0;
}
const int blockSize = 128;
float scaleCoeff = fundusCoeff/(float)frameWidth;
int increment = depth;
while (height*increment>65535) {
increment >>= 1;
}
dim3 dimBlockX(blockSize);
dim3 dimGridX(height*increment);
if (partialFundus) {
renderFundus<blockSize><<<dimGridX, dimBlockX, 0, kernelStream>>>
(dev_volume, dev_fundus, width, scaleCoeff, inputIdx, height*idx);
} else {
for (int i=0; i<depth; i+=increment) {
renderFundus<blockSize><<<dimGridX, dimBlockX, 0, kernelStream>>>
(dev_volume, dev_fundus, width, scaleCoeff, height*i, height*i);
}
}
}
/*****************************************************************************************************************************/
/****************************************** Miscellaneous Functions For Adjustments *****************************************/
/*****************************************************************************************************************************/
/*****************************************************************************************************************************/
extern "C" void acquireDC()
{
dcAcquired = false;
}
/*****************************************************************************************************************************/
/*****************************************************************************************************************************/
/*****************************************************************************************************************************/
extern "C" void decreaseMinVal()
{
minVal -= 0.5f;
printf("New minVal is %0.1f", minVal);
printf("\n");
}
/*****************************************************************************************************************************/
extern "C" void increaseMinVal()
{
if (minVal==maxVal-1) {
printf("Error: minVal cannot be equal or greater than maxVal!\n");
printf("minVal is: %f, maxVal is: %f \n", minVal, maxVal);
} else {
minVal += 0.5f;
printf("New minVal is %0.1f", minVal);
printf("\n");
}
}
/*****************************************************************************************************************************/
extern "C" void increaseMaxVal()
{
maxVal += 0.5f;
printf("New maxVal is %0.1f", maxVal);
printf("\n");
}
/*****************************************************************************************************************************/
extern "C" void decreaseMaxVal()
{
if (maxVal==minVal+1) {
printf("Error: maxVal cannot be equal or less than than minVal!\n");
printf("minVal is: %f, maxVal is: %f \n", minVal, maxVal);
} else {
maxVal -= 0.5f;
printf("New maxVal is %0.1f", maxVal);
printf("\n");
}
}
/*****************************************************************************************************************************/
extern "C" void increaseFundusCoeff()
{
fundusCoeff += 0.5f;
printf("New fundusCoeff is %0.1f", fundusCoeff);
printf("\n");
}
/*****************************************************************************************************************************/
extern "C" void decreaseFundusCoeff()
{
if (fundusCoeff<=0) {
printf("Error: fundusCoeff cannot be less than 0!\n");
} else {
fundusCoeff -= 0.5f;
printf("New fundusCoeff is %0.1f", fundusCoeff);
printf("\n");
}
}
/*****************************************************************************************************************************/
|
63f4236a3d9beeafd8bc4c8acc99714b35d9c9a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
const int Nthreads = 1024, NrankMax = 3, nt0max = 71, NchanMax = 1024;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void getW(const double *Params, double *wtw, double *W){
int Nfilt, nt0, tid, bid, i, t, Nrank,k, tmax;
double x, x0, xmax;
volatile __shared__ double sW[nt0max*NrankMax], swtw[nt0max*nt0max], xN[1];
nt0 = (int) Params[4];
Nrank = (int) Params[6];
Nfilt = (int) Params[1];
tmax = (int) Params[11];
tid = threadIdx.x;
bid = blockIdx.x;
for (k=0;k<nt0;k++)
swtw[tid + k*nt0] = wtw[tid + k*nt0 + bid * nt0 * nt0];
for (k=0;k<Nrank;k++)
sW[tid + k*nt0] = W[tid + bid * nt0 + k * nt0*Nfilt];
__syncthreads();
// for each svd
for(k=0;k<Nrank;k++){
for (i=0;i<100;i++){
// compute projection of wtw
x = 0.0f;
for (t=0;t<nt0;t++)
x+= swtw[tid + t*nt0] * sW[t + k*nt0];
__syncthreads();
if (i<99){
sW[tid + k*nt0] = x;
__syncthreads();
if (tid==0){
x0 = 0.00001f;
for(t=0;t<nt0;t++)
x0+= sW[t + k*nt0] * sW[t + k*nt0];
xN[0] = sqrt(x0);
}
__syncthreads();
sW[tid + k*nt0] = x/xN[0];
__syncthreads();
}
}
// now subtract off this svd from wtw
for (t=0;t<nt0;t++)
swtw[tid + t*nt0] -= sW[t+k*nt0] * x;
__syncthreads();
}
xmax = sW[tmax];
__syncthreads();
sW[tid] = - sW[tid] * copysign(1.0, xmax);
// now write W back
for (k=0;k<Nrank;k++)
W[tid + bid * nt0 + k * nt0*Nfilt] = sW[tid + k*nt0];
} | 63f4236a3d9beeafd8bc4c8acc99714b35d9c9a2.cu | #include "includes.h"
const int Nthreads = 1024, NrankMax = 3, nt0max = 71, NchanMax = 1024;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void getW(const double *Params, double *wtw, double *W){
int Nfilt, nt0, tid, bid, i, t, Nrank,k, tmax;
double x, x0, xmax;
volatile __shared__ double sW[nt0max*NrankMax], swtw[nt0max*nt0max], xN[1];
nt0 = (int) Params[4];
Nrank = (int) Params[6];
Nfilt = (int) Params[1];
tmax = (int) Params[11];
tid = threadIdx.x;
bid = blockIdx.x;
for (k=0;k<nt0;k++)
swtw[tid + k*nt0] = wtw[tid + k*nt0 + bid * nt0 * nt0];
for (k=0;k<Nrank;k++)
sW[tid + k*nt0] = W[tid + bid * nt0 + k * nt0*Nfilt];
__syncthreads();
// for each svd
for(k=0;k<Nrank;k++){
for (i=0;i<100;i++){
// compute projection of wtw
x = 0.0f;
for (t=0;t<nt0;t++)
x+= swtw[tid + t*nt0] * sW[t + k*nt0];
__syncthreads();
if (i<99){
sW[tid + k*nt0] = x;
__syncthreads();
if (tid==0){
x0 = 0.00001f;
for(t=0;t<nt0;t++)
x0+= sW[t + k*nt0] * sW[t + k*nt0];
xN[0] = sqrt(x0);
}
__syncthreads();
sW[tid + k*nt0] = x/xN[0];
__syncthreads();
}
}
// now subtract off this svd from wtw
for (t=0;t<nt0;t++)
swtw[tid + t*nt0] -= sW[t+k*nt0] * x;
__syncthreads();
}
xmax = sW[tmax];
__syncthreads();
sW[tid] = - sW[tid] * copysign(1.0, xmax);
// now write W back
for (k=0;k<Nrank;k++)
W[tid + bid * nt0 + k * nt0*Nfilt] = sW[tid + k*nt0];
} |
c6082050bfdb4b46745b35125e93f9738e36e7fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/features2d/features2d.hpp>
#include "cuda_akaze.h"
#include "cudautils.h"
#define CONVROW_W 160
#define CONVCOL_W 32
#define CONVCOL_H 40
#define CONVCOL_S 8
#define SCHARR_W 32
#define SCHARR_H 16
#define NLDSTEP_W 32
#define NLDSTEP_H 13
#define ORIENT_S (13 * 16)
#define EXTRACT_S 64
__device__ __constant__ float d_Kernel[21];
__device__ unsigned int d_PointCounter[1];
__device__ unsigned int d_ExtremaIdx[16];
__device__ __constant__ int comp_idx_1[61 * 8];
__device__ __constant__ int comp_idx_2[61 * 8];
hipStream_t copyStream;
//__device__ __constant__ float norm_factors[29];
#if 1
#define CHK
#else
#define CHK hipDeviceSynchronize(); \
{ \
hipError_t cuerr = hipGetLastError(); \
if (cuerr) { \
std::cout << "Cuda error " << hipGetErrorString(cuerr) << ". at " << __FILE__ << ":" << __LINE__ << std::endl; \
} \
}
#endif
void WaitCuda() {
hipStreamSynchronize(copyStream);
}
struct Conv_t {
float *d_Result;
float *d_Data;
int width;
int pitch;
int height;
};
template <int RADIUS>
__global__ void ConvRowGPU(struct Conv_t s) {
//__global__ void ConvRowGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVROW_W + 2 * RADIUS];
const int tx = threadIdx.x;
const int minx = blockIdx.x * CONVROW_W;
const int maxx = min(minx + CONVROW_W, s.width);
const int yptr = blockIdx.y * s.pitch;
const int loadPos = minx + tx - RADIUS;
const int writePos = minx + tx;
if (loadPos < 0)
data[tx] = s.d_Data[yptr];
else if (loadPos >= s.width)
data[tx] = s.d_Data[yptr + s.width - 1];
else
data[tx] = s.d_Data[yptr + loadPos];
__syncthreads();
if (writePos < maxx && tx < CONVROW_W) {
float sum = 0.0f;
for (int i = 0; i <= (2 * RADIUS); i++) sum += data[tx + i] * d_Kernel[i];
s.d_Result[yptr + writePos] = sum;
}
}
///////////////////////////////////////////////////////////////////////////////
// Column convolution filter
///////////////////////////////////////////////////////////////////////////////
template <int RADIUS>
__global__ void ConvColGPU(struct Conv_t s) {
//__global__ void ConvColGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVCOL_W * (CONVCOL_H + 2 * RADIUS)];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int miny = blockIdx.y * CONVCOL_H;
const int maxy = min(miny + CONVCOL_H, s.height) - 1;
const int totStart = miny - RADIUS;
const int totEnd = maxy + RADIUS;
const int colStart = blockIdx.x * CONVCOL_W + tx;
const int colEnd = colStart + (s.height - 1) * s.pitch;
const int smemStep = CONVCOL_W * CONVCOL_S;
const int gmemStep = s.pitch * CONVCOL_S;
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (totStart + ty) * s.pitch;
for (int y = totStart + ty; y <= totEnd; y += blockDim.y) {
if (y < 0)
data[smemPos] = s.d_Data[colStart];
else if (y >= s.height)
data[smemPos] = s.d_Data[colEnd];
else
data[smemPos] = s.d_Data[gmemPos];
smemPos += smemStep;
gmemPos += gmemStep;
}
}
__syncthreads();
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (miny + ty) * s.pitch;
for (int y = miny + ty; y <= maxy; y += blockDim.y) {
float sum = 0.0f;
for (int i = 0; i <= 2 * RADIUS; i++)
sum += data[smemPos + i * CONVCOL_W] * d_Kernel[i];
s.d_Result[gmemPos] = sum;
smemPos += smemStep;
gmemPos += gmemStep;
}
}
}
template <int RADIUS>
double SeparableFilter(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
float *h_Kernel) {
int width = inimg.width;
int pitch = inimg.pitch;
int height = inimg.height;
float *d_DataA = inimg.d_data;
float *d_DataB = outimg.d_data;
float *d_Temp = temp.d_data;
if (d_DataA == NULL || d_DataB == NULL || d_Temp == NULL) {
printf("SeparableFilter: missing data\n");
return 0.0;
}
// TimerGPU timer0(0);
const unsigned int kernelSize = (2 * RADIUS + 1) * sizeof(float);
safeCall(hipMemcpyToSymbolAsync(d_Kernel, h_Kernel, kernelSize));
dim3 blockGridRows(iDivUp(width, CONVROW_W), height);
dim3 threadBlockRows(CONVROW_W + 2 * RADIUS);
struct Conv_t s;
s.d_Result = d_Temp;
s.d_Data = d_DataA;
s.width = width;
s.pitch = pitch;
s.height = height;
ConvRowGPU<RADIUS> << <blockGridRows, threadBlockRows>>> (s);
// checkMsg("ConvRowGPU() execution failed\n");
// safeCall(hipDeviceSynchronize());
dim3 blockGridColumns(iDivUp(width, CONVCOL_W), iDivUp(height, CONVCOL_H));
dim3 threadBlockColumns(CONVCOL_W, CONVCOL_S);
s.d_Result = d_DataB;
s.d_Data = d_Temp;
ConvColGPU<RADIUS> << <blockGridColumns, threadBlockColumns>>> (s);
// checkMsg("ConvColGPU() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("SeparableFilter time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
template <int RADIUS>
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
double var) {
float kernel[2 * RADIUS + 1];
float kernelSum = 0.0f;
for (int j = -RADIUS; j <= RADIUS; j++) {
kernel[j + RADIUS] = (float)expf(-(double)j * j / 2.0 / var);
kernelSum += kernel[j + RADIUS];
}
for (int j = -RADIUS; j <= RADIUS; j++) kernel[j + RADIUS] /= kernelSum;
return SeparableFilter<RADIUS>(inimg, outimg, temp, kernel);
}
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp, double var,
int kernsize) {
if (kernsize <= 5)
return LowPass<2>(inimg, outimg, temp, var);
else if (kernsize <= 7)
return LowPass<3>(inimg, outimg, temp, var);
else if (kernsize <= 9)
return LowPass<4>(inimg, outimg, temp, var);
else {
if (kernsize > 11)
std::cerr << "Kernels larger than 11 not implemented" << std::endl;
return LowPass<5>(inimg, outimg, temp, var);
}
}
__global__ void Scharr(float *imgd, float *lxd, float *lyd, int width,
int pitch, int height) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
lxd[y * pitch + x] = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
lyd[y * pitch + x] = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
}
}
double Scharr(CudaImage &img, CudaImage &lx, CudaImage &ly) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Scharr << <blocks, threads>>>
(img.d_data, lx.d_data, ly.d_data, img.width, img.pitch, img.height);
// checkMsg("Scharr() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("Scharr time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Flow(float *imgd, float *flowd, int width, int pitch,
int height, DIFFUSIVITY_TYPE type, float invk) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
float lx = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
float ly = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
float dif2 = invk * (lx * lx + ly * ly);
if (type == PM_G1)
flowd[y * pitch + x] = exp(-dif2);
else if (type == PM_G2)
flowd[y * pitch + x] = 1.0f / (1.0f + dif2);
else if (type == WEICKERT)
flowd[y * pitch + x] = 1.0f - exp(-3.315 / (dif2 * dif2 * dif2 * dif2));
else
flowd[y * pitch + x] = 1.0f / sqrt(1.0f + dif2);
}
}
double Flow(CudaImage &img, CudaImage &flow, DIFFUSIVITY_TYPE type,
float kcontrast) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Flow << <blocks, threads>>> (img.d_data, flow.d_data, img.width, img.pitch,
img.height, type,
1.0f / (kcontrast * kcontrast));
// checkMsg("Flow() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("Flow time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
struct NLDStep_t {
float *imgd;
float *flod;
float *temd;
int width;
int pitch;
int height;
float stepsize;
};
//__global__ void NLDStep(float *imgd, float *flod, float *temd, int width, int
// pitch, int height, float stepsize)
__global__ void NLDStep(NLDStep_t s) {
#undef BW
#define BW (NLDSTEP_W + 2)
__shared__ float ibuff[BW * (NLDSTEP_H + 2)];
__shared__ float fbuff[BW * (NLDSTEP_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * NLDSTEP_W + tx;
int y = blockIdx.y * NLDSTEP_H + ty;
int xp = (x == 0 ? 0 : (x > s.width ? s.width - 1 : x - 1));
int yp = (y == 0 ? 0 : (y > s.height ? s.height - 1 : y - 1));
ibuff[ty * BW + tx] = s.imgd[yp * s.pitch + xp];
fbuff[ty * BW + tx] = s.flod[yp * s.pitch + xp];
__syncthreads();
if (tx < NLDSTEP_W && ty < NLDSTEP_H && x < s.width && y < s.height) {
float *ib = ibuff + (ty + 1) * BW + (tx + 1);
float *fb = fbuff + (ty + 1) * BW + (tx + 1);
float ib0 = ib[0];
float fb0 = fb[0];
float xpos = (fb0 + fb[+1]) * (ib[+1] - ib0);
float xneg = (fb0 + fb[-1]) * (ib0 - ib[-1]);
float ypos = (fb0 + fb[+BW]) * (ib[+BW] - ib0);
float yneg = (fb0 + fb[-BW]) * (ib0 - ib[-BW]);
s.temd[y * s.pitch + x] = s.stepsize * (xpos - xneg + ypos - yneg);
}
}
struct NLDUpdate_t {
float *imgd;
float *temd;
int width;
int pitch;
int height;
};
//__global__ void NLDUpdate(float *imgd, float *temd, int width, int pitch, int
// height)
__global__ void NLDUpdate(NLDUpdate_t s) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x < s.width && y < s.height) {
int p = y * s.pitch + x;
s.imgd[p] = s.imgd[p] + s.temd[p];
}
}
double NLDStep(CudaImage &img, CudaImage &flow, CudaImage &temp,
float stepsize) {
// TimerGPU timer0(0);
dim3 blocks0(iDivUp(img.width, NLDSTEP_W), iDivUp(img.height, NLDSTEP_H));
dim3 threads0(NLDSTEP_W + 2, NLDSTEP_H + 2);
NLDStep_t s;
s.imgd = img.d_data;
s.flod = flow.d_data;
s.temd = temp.d_data;
s.width = img.width;
s.pitch = img.pitch;
s.height = img.height;
s.stepsize = 0.5 * stepsize;
// NLDStep<<<blocks0, threads0>>>(img.d_data, flow.d_data, temp.d_data,
// img.width, img.pitch, img.height, 0.5f*stepsize);
NLDStep << <blocks0, threads0>>> (s);
// checkMsg("NLDStep() execution failed\n");
// safeCall(hipDeviceSynchronize());
dim3 blocks1(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads1(32, 16);
NLDUpdate_t su;
su.imgd = img.d_data;
su.temd = temp.d_data;
su.width = img.width;
su.height = img.height;
su.pitch = img.pitch;
// NLDUpdate<<<blocks1, threads1>>>(img.d_data, temp.d_data, img.width,
// img.pitch, img.height);
NLDUpdate << <blocks1, threads1>>> (su);
// checkMsg("NLDUpdate() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("NLDStep time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void HalfSample(float *iimd, float *oimd, int iwidth, int iheight,
int ipitch, int owidth, int oheight, int opitch) {
__shared__ float buffer[16 * 33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * 16 + tx;
int y = blockIdx.y * 16 + ty;
if (x >= owidth || y >= oheight) return;
float *ptri = iimd + (2 * y) * ipitch + (2 * x);
if (2 * owidth == iwidth) {
buffer[ty * 32 + tx] = owidth * (ptri[0] + ptri[1]);
ptri += ipitch;
buffer[ty * 32 + tx + 16] = owidth * (ptri[0] + ptri[1]);
if (ty == 15) {
ptri += ipitch;
buffer[tx + 32 * 16] = owidth * (ptri[0] + ptri[1]);
} else if (y * 2 + 3 == iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] = owidth * (ptri[0] + ptri[1]);
}
} else {
float f0 = owidth - x;
float f2 = 1 + x;
buffer[ty * 32 + tx] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
ptri += ipitch;
buffer[ty * 32 + tx + 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
if (ty == 15 && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[1];
} else if (y * 2 + 3 == iheight && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] =
f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
}
}
__syncthreads();
float *buff = buffer + 32 * ty + tx;
if (2 * oheight == iheight)
oimd[y * opitch + x] = oheight * (buff[0] + buff[16]) / (iwidth * iheight);
else {
float f0 = oheight - y;
float f2 = 1 + y;
oimd[y * opitch + x] = (f0 * buff[0] + oheight * buff[16] + f2 * buff[32]) /
(iwidth * iheight);
}
}
__global__ void HalfSample2(float *iimd, float *oimd, int ipitch, int owidth,
int oheight, int opitch) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= owidth || y >= oheight) return;
float *ptr = iimd + (2 * y) * ipitch + (2 * x);
oimd[y * opitch + x] =
0.25f * (ptr[0] + ptr[1] + ptr[ipitch + 0] + ptr[ipitch + 1]);
}
double HalfSample(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
if (inimg.width == 2 * outimg.width && inimg.height == 2 * outimg.height) {
dim3 blocks(iDivUp(outimg.width, 32), iDivUp(outimg.height, 16));
dim3 threads(32, 16);
HalfSample2 << <blocks, threads>>> (inimg.d_data, outimg.d_data,
inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
} else {
dim3 blocks(iDivUp(outimg.width, 16), iDivUp(outimg.height, 16));
dim3 threads(16, 16);
HalfSample << <blocks, threads>>> (inimg.d_data, outimg.d_data, inimg.width,
inimg.height, inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
}
// checkMsg("HalfSample() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HalfSample time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double Copy(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
double gpuTime = 0; // timer0.read();
safeCall(hipMemcpy2DAsync(outimg.d_data, sizeof(float) * outimg.pitch,
inimg.d_data, sizeof(float) * outimg.pitch,
sizeof(float) * inimg.width, inimg.height,
hipMemcpyDeviceToDevice));
#ifdef VERBOSE
printf("Copy time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
float *AllocBuffers(int width, int height, int num, int omax, int &maxpts,
std::vector<CudaImage> &buffers, cv::KeyPoint *&pts,
cv::KeyPoint *&ptsbuffer, int *&ptindices, unsigned char *&desc, float *&descbuffer, CudaImage *&ims) {
maxpts = 4 * ((maxpts+3)/4);
buffers.resize(omax * num);
int w = width;
int h = height;
int p = iAlignUp(w, 128);
int size = 0;
for (int i = 0; i < omax; i++) {
for (int j = 0; j < num; j++) {
CudaImage &buf = buffers[i * num + j];
buf.width = w;
buf.height = h;
buf.pitch = p;
buf.d_data = (float *)((long)size);
size += h * p;
}
w /= 2;
h /= 2;
p = iAlignUp(w, 128);
}
int ptsstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int ptsbufferstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int descstart = size;
size += sizeof(unsigned char)*maxpts*61/sizeof(float);
int descbufferstart = size;
size += sizeof(float)*3*29*maxpts / sizeof(float);
int indicesstart = size;
size += 21*21*sizeof(int)*maxpts/sizeof(float);
int imgstart = size;
size += sizeof(CudaImage) * (num * omax + sizeof(float) - 1) / sizeof(float);
float *memory = NULL;
size_t pitch;
std::cout << "allocating " << size/1024./1024. << " Mbytes of gpu memory\n";
safeCall(hipMallocPitch((void **)&memory, &pitch, (size_t)4096,
(size + 4095) / 4096 * sizeof(float)));
for (int i = 0; i < omax * num; i++) {
CudaImage &buf = buffers[i];
buf.d_data = memory + (long)buf.d_data;
}
pts = (cv::KeyPoint *)(memory + ptsstart);
ptsbuffer = (cv::KeyPoint *)(memory + ptsbufferstart);
desc = (unsigned char *)(memory + descstart);
descbuffer = (float*)(memory + descbufferstart);
ptindices = (int*)(memory + indicesstart);
ims = (CudaImage *)(memory + imgstart);
InitCompareIndices();
hipStreamCreate(©Stream);
return memory;
}
void FreeBuffers(float *buffers) { safeCall(hipFree(buffers)); }
__device__ unsigned int d_Maxval[1];
__device__ int d_Histogram[512];
#define CONTRAST_W 64
#define CONTRAST_H 7
#define HISTCONT_W 64
#define HISTCONT_H 8
#define HISTCONT_R 4
__global__ void MaxContrast(float *imgd, float *cond, int width, int pitch,
int height) {
#define WID (CONTRAST_W + 2)
__shared__ float buffer[WID * (CONTRAST_H + 2)];
__shared__ unsigned int maxval[32];
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx < 32 && !ty) maxval[tx] = 0.0f;
__syncthreads();
int x = blockIdx.x * CONTRAST_W + tx;
int y = blockIdx.y * CONTRAST_H + ty;
if (x >= width || y >= height) return;
float *b = buffer + ty * WID + tx;
b[0] = imgd[y * pitch + x];
__syncthreads();
if (tx < CONTRAST_W && ty < CONTRAST_H && x < width - 2 && y < height - 2) {
float dx = 3.0f * (b[0] - b[2] + b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[WID] - b[WID + 2]);
float dy = 3.0f * (b[0] + b[2] - b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[1] - b[2 * WID + 1]);
float grad = sqrt(dx * dx + dy * dy);
cond[(y + 1) * pitch + (x + 1)] = grad;
unsigned int *gradi = (unsigned int *)&grad;
atomicMax(maxval + (tx & 31), *gradi);
}
__syncthreads();
if (tx < 32 && !ty) atomicMax(d_Maxval, maxval[tx]);
}
__global__ void HistContrast(float *cond, int width, int pitch, int height,
float imaxval, int nbins) {
__shared__ int hist[512];
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = ty * HISTCONT_W + tx;
if (i < nbins) hist[i] = 0;
__syncthreads();
int x = blockIdx.x * HISTCONT_W + tx;
int y = blockIdx.y * HISTCONT_H * HISTCONT_R + ty;
if (x > 0 && x < width - 1) {
for (int i = 0; i < HISTCONT_R; i++) {
if (y > 0 && y < height - 1) {
int idx = min((int)(nbins * cond[y * pitch + x] * imaxval), nbins - 1);
atomicAdd(hist + idx, 1);
}
y += HISTCONT_H;
}
}
__syncthreads();
if (i < nbins && hist[i] > 0) atomicAdd(d_Histogram + i, hist[i]);
}
double ContrastPercentile(CudaImage &img, CudaImage &temp, CudaImage &blur,
float perc, int nbins, float &contrast) {
// TimerGPU timer0(0);
LowPass(img, blur, temp, 1.0f, 5);
float h_Maxval = 0.0f;
safeCall(hipMemcpyToSymbolAsync(d_Maxval, &h_Maxval, sizeof(float)));
dim3 blocks1(iDivUp(img.width, CONTRAST_W), iDivUp(img.height, CONTRAST_H));
dim3 threads1(CONTRAST_W + 2, CONTRAST_H + 2);
MaxContrast << <blocks1, threads1>>>
(blur.d_data, temp.d_data, blur.width, blur.pitch, blur.height);
// checkMsg("MaxContrast() execution failed\n");
// safeCall(hipDeviceSynchronize());
safeCall(hipMemcpyFromSymbolAsync(&h_Maxval, d_Maxval, sizeof(float)));
if (nbins > 512) {
printf(
"Warning: Largest number of possible bins in ContrastPercentile() is "
"512\n");
nbins = 512;
}
int h_Histogram[512];
memset(h_Histogram, 0, nbins * sizeof(int));
safeCall(
hipMemcpyToSymbolAsync(d_Histogram, h_Histogram, nbins * sizeof(int)));
dim3 blocks2(iDivUp(temp.width, HISTCONT_W),
iDivUp(temp.height, HISTCONT_H * HISTCONT_R));
dim3 threads2(HISTCONT_W, HISTCONT_H);
HistContrast << <blocks2, threads2>>> (temp.d_data, temp.width, temp.pitch,
temp.height, 1.0f / h_Maxval, nbins);
safeCall(
hipMemcpyFromSymbolAsync(h_Histogram, d_Histogram, nbins * sizeof(int)));
int npoints = (temp.width - 2) * (temp.height - 2);
int nthreshold = (int)(npoints * perc);
int k = 0, nelements = 0;
for (k = 0; nelements < nthreshold && k < nbins; k++)
nelements += h_Histogram[k];
contrast = (nelements < nthreshold ? 0.03f : h_Maxval * ((float)k / nbins));
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ContrastPercentile time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Derivate(float *imd, float *lxd, float *lyd, int width,
int pitch, int height, int step, float fac1,
float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = imd[yl * pitch + xl];
float ur = imd[yl * pitch + xh];
float ll = imd[yh * pitch + xl];
float lr = imd[yh * pitch + xh];
float cl = imd[y * pitch + xl];
float cr = imd[y * pitch + xh];
lxd[y * pitch + x] = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = imd[yl * pitch + x];
float lc = imd[yh * pitch + x];
lyd[y * pitch + x] = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
}
__global__ void HessianDeterminant(float *lxd, float *lyd, float *detd,
int width, int pitch, int height, int step,
float fac1, float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = lxd[yl * pitch + xl];
float ur = lxd[yl * pitch + xh];
float ll = lxd[yh * pitch + xl];
float lr = lxd[yh * pitch + xh];
float cl = lxd[y * pitch + xl];
float cr = lxd[y * pitch + xh];
float lxx = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = lxd[yl * pitch + x];
float lc = lxd[yh * pitch + x];
float lyx = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
ul = lyd[yl * pitch + xl];
ur = lyd[yl * pitch + xh];
ll = lyd[yh * pitch + xl];
lr = lyd[yh * pitch + xh];
uc = lyd[yl * pitch + x];
lc = lyd[yh * pitch + x];
float lyy = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
detd[y * pitch + x] = lxx * lyy - lyx * lyx;
}
double HessianDeterminant(CudaImage &img, CudaImage &lx, CudaImage &ly,
int step) {
// TimerGPU timer0(0);
float w = 10.0 / 3.0;
float fac1 = 1.0 / (2.0 * (w + 2.0));
float fac2 = w * fac1;
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
Derivate << <blocks, threads>>> (img.d_data, lx.d_data, ly.d_data, img.width,
img.pitch, img.height, step, fac1, fac2);
// checkMsg("Derivate() execution failed\n");
// safeCall(hipDeviceSynchronize());
HessianDeterminant << <blocks, threads>>> (lx.d_data, ly.d_data, img.d_data,
img.width, img.pitch, img.height,
step, fac1, fac2);
// checkMsg("HessianDeterminant() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HessianDeterminant time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void FindExtrema(float *imd, float *imp, float *imn, int maxx,
int pitch, int maxy, float border, float dthreshold,
int scale, int octave, float size,
cv::KeyPoint *pts, int maxpts) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
int left_x = (int)(x - border + 0.5f) - 1;
int right_x = (int)(x + border + 0.5f) + 1;
int up_y = (int)(y - border + 0.5f) - 1;
int down_y = (int)(y + border + 0.5f) + 1;
if (left_x < 0 || right_x >= maxx || up_y < 0 || down_y >= maxy) return;
int p = y * pitch + x;
float v = imd[p];
if (v > dthreshold && v > imd[p - pitch - 1] && v > imd[p + pitch + 1] &&
v > imd[p + pitch - 1] && v > imd[p - pitch + 1] && v > imd[p - 1] &&
v > imd[p + 1] && v > imd[p + pitch] && v > imd[p - pitch]) {
float dx = 0.5f * (imd[p + 1] - imd[p - 1]);
float dy = 0.5f * (imd[p + pitch] - imd[p - pitch]);
float dxx = imd[p + 1] + imd[p - 1] - 2.0f * v;
float dyy = imd[p + pitch] + imd[p - pitch] - 2.0f * v;
float dxy = 0.25f * (imd[p + pitch + 1] + imd[p - pitch - 1] -
imd[p + pitch - 1] - imd[p - pitch + 1]);
float det = dxx * dyy - dxy * dxy;
float idet = (det != 0.0f ? 1.0f / det : 0.0f);
float dst0 = idet * (dxy * dy - dyy * dx);
float dst1 = idet * (dxy * dx - dxx * dy);
bool weak = true;
if (dst0 >= -1.0f && dst0 <= 1.0f && dst1 >= -1.0f && dst1 <= 1.0f) {
weak = 0;
}
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx < maxpts) {
cv::KeyPoint &point = pts[idx];
point.response = v;
point.size = (weak ? -1 : 1) * 2.0 * size;
float octsub = (dst0 < 0 ? -1 : 1) * (octave + fabs(dst0));
*(float *)(&point.octave) = (weak ? octave : octsub);
point.class_id = scale;
int ratio = (1 << octave);
point.pt.x = ratio * (x);
point.pt.y = ratio * (y);
point.angle = dst1;
} else {
atomicAdd(d_PointCounter,-1);
}
}
}
__global__ void CopyIdxArray(int scale) {
d_ExtremaIdx[scale] = d_PointCounter[0];
}
double FindExtrema(CudaImage &img, CudaImage &imgp, CudaImage &imgn,
float border, float dthreshold, int scale, int octave,
float size, cv::KeyPoint *pts, int maxpts) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
float b = border;
FindExtrema << <blocks, threads>>>
(img.d_data, imgp.d_data, imgn.d_data, img.width, img.pitch, img.height,
b, dthreshold, scale, octave, size, pts, maxpts);
CopyIdxArray << <1, 1>>> (scale);
CHK
// checkMsg("FindExtrema() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindExtrema time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
void ClearPoints() {
int totPts = 0;
safeCall(hipMemcpyToSymbolAsync(d_PointCounter, &totPts, sizeof(int)));
}
__forceinline__ __device__ void atomicSort(int *pts, int shmidx, int offset,
int sortdir) {
int &p0 = pts[shmidx + sortdir];
int &p1 = pts[shmidx + (offset - sortdir)];
if (p0 < p1) {
int t = p0;
p0 = p1;
p1 = t;
}
}
__forceinline__ __device__ bool atomicCompare(const cv::KeyPoint &i,
const cv::KeyPoint &j) {
float t = i.pt.x * j.pt.x;
if (t == 0) {
if (j.pt.x != 0) {
return false;
} else {
return true;
}
}
if (i.pt.y < j.pt.y) return true;
if (i.pt.y == j.pt.y && i.pt.x < j.pt.x) return true;
return false;
}
template <typename T>
struct sortstruct_t {
T idx;
short x;
short y;
};
template <typename T>
__forceinline__ __device__ bool atomicCompare(const sortstruct_t<T> &i,
const sortstruct_t<T> &j) {
int t = i.x * j.x;
if (t == 0) {
if (j.x != 0) {
return false;
} else {
return true;
}
}
if (i.y < j.y) return true;
if (i.y == j.y && i.x < j.x) return true;
return false;
}
template <typename T>
__forceinline__ __device__ void atomicSort(sortstruct_t<T> *pts, int shmidx,
int offset, int sortdir) {
sortstruct_t<T> &p0 = pts[(shmidx + sortdir)];
sortstruct_t<T> &p1 = pts[(shmidx + (offset - sortdir))];
if (atomicCompare(p0, p1)) {
int idx = p0.idx;
short ptx = p0.x;
short pty = p0.y;
p0.idx = p1.idx;
p0.x = p1.x;
p0.y = p1.y;
p1.idx = idx;
p1.x = ptx;
p1.y = pty;
}
}
#define BitonicSortThreads 1024
template <class T>
__global__ void bitonicSort(const T *pts, T *newpts) {
int scale = blockIdx.x;
__shared__ struct sortstruct_t<short> shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
for (int i = threadIdx.x; i < 8192;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<8192; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<4096; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < 8192; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
template <class T>
__global__ void bitonicSort_global(const T *pts, T *newpts, sortstruct_t<int>* _shm, int _sz) {
int scale = blockIdx.x;
//__shared__ struct sortstruct_t shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
int nkpts_ceil = 1;
while (nkpts_ceil < nkpts) nkpts_ceil *= 2;
sortstruct_t<int> *shm = &(_shm[_sz*blockIdx.x]);
for (int i = threadIdx.x; i < nkpts_ceil;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<nkpts_ceil; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<nkpts_ceil/2; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < nkpts_ceil; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
#define FindNeighborsThreads 32
__global__ void FindNeighbors(cv::KeyPoint *pts, int *kptindices, int width) {
__shared__ int gidx[1];
// which scale?
int scale = pts[blockIdx.x].class_id;
int cmpIdx = scale < 1 ? 0 : d_ExtremaIdx[scale - 1];
float size = pts[blockIdx.x].size;
gidx[0] = 1;
__syncthreads();
// One keypoint per block.
cv::KeyPoint &kpt = pts[blockIdx.x];
// Key point to compare. Only compare with smaller than current
// Iterate backwards instead and break as soon as possible!
//for (int i = cmpIdx + threadIdx.x; i < blockIdx.x; i += FindNeighborsThreads) {
for (int i=blockIdx.x-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
//if (fabs(kpt.pt.y-kpt_cmp.pt.y) > size*.5f) continue;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
if (scale > 0) {
int startidx = d_ExtremaIdx[scale-1];
cmpIdx = scale < 2 ? 0 : d_ExtremaIdx[scale - 2];
for (int i=startidx-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt_cmp.pt.y-kpt.pt.y > size*.5f) continue;
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
kptindices[blockIdx.x * width] = gidx[0];
}
}
// TODO Intermediate storage of memberarray and minneighbor
#define FilterExtremaThreads 1024
__global__ void FilterExtrema_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *kptindices, int width,
int *memberarray,
int *minneighbor,
char *shouldAdd) {
// -1 means not processed
// -2 means added but replaced
// >=0 means added
__shared__ bool shouldBreak[1];
int nump = d_PointCounter[0];
// Initially all points are unprocessed
for (int i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
memberarray[i] = -1;
}
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
// Loop until there are no more points to process
for (int xx=0; xx<10000; ++xx) {
//while (true) {
// Outer loop to handle more than 8*1024 points
// Start by restoring memberarray
// Make sure to add appropriate offset to indices
// for (int offset=0; offset<nump; offset += 8*1024) {
// memberarray[i] = storedmemberarray[i+offset];
//for (int offset=0; offset<nump; offset += 8*1024) {
// Mark all points for addition and no minimum neighbor
//int maxi = nump-offset >= 8*1024 ? 8*1024 : nump-offset;
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
minneighbor[i] = nump+1;
shouldAdd[i] = true;
}
__syncthreads();
// Look through all points. If there are points that have not been processed,
// disable breaking and check if it has no processed neighbors (add), has all processed
// neighbors (compare with neighbors) or has some unprocessed neighbor (wait)
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
int neighborsSize = kptindices[i * width] - 1;
int *neighbors = &(kptindices[i * width + 1]);
// Only do if we didn't process the point before
if (memberarray[i] == -1) {
// If we process at least one point we shouldn't break
// No need to sync. Only want to know if at least one thread wants to
// continue
shouldBreak[0] = false;
// Sort neighbors according to the order of currently added points
// (often very few)
// If the neighbor has been replaced, stick it to the back
// If any neighbor has not been processed, break;
bool shouldProcess = true;
for (int k = 0; k < neighborsSize; ++k) {
// If the point has one or more unprocessed neighbors, skip
if (memberarray[neighbors[k]] == -1) {
shouldProcess = false;
shouldAdd[i] = false;
break;
}
// If it has a neighbor that is in the list, we don't add, but process
if (memberarray[neighbors[k]] >= 0) {
shouldAdd[i] = false;
}
}
// We should process and potentially replace the neighbor
if (shouldProcess && !shouldAdd[i]) {
// Find the smallest neighbor. Often only one or two, so no ned for fancy algorithm
for (int k = 0; k < neighborsSize; ++k) {
for (int j = k + 1; j < neighborsSize; ++j) {
if (memberarray[neighbors[k]] == -2 ||
(memberarray[neighbors[j]] != -2 &&
memberarray[neighbors[j]] < memberarray[neighbors[k]])) {
int t = neighbors[k];
neighbors[k] = neighbors[j];
neighbors[j] = t;
}
}
}
// Pick the first neighbor
// We need to make sure, in case more than one point has this
// neighbor,
// That the point with lowest memberarrayindex processes it first
// Here minneighbor[i] is the target and i the neighbor
int nidx = neighbors[0];
minneighbor[nidx] = min(minneighbor[nidx], (int)i);
}
}
}
__syncthreads();
// Check which points we can add
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (memberarray[i] == -1) {
if (shouldAdd[i]) {
memberarray[i] = i;
}
}
}
__syncthreads();
// Look at the neighbors. If the response is higher, replace
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (minneighbor[i] != nump+1) {
if (memberarray[minneighbor[i]] == -1) {
if (!shouldAdd[minneighbor[i]]) {
const cv::KeyPoint &p0 = kpts[minneighbor[i]];
const cv::KeyPoint &p1 = kpts[i];
if (p0.response > p1.response) {
memberarray[minneighbor[i]] = i;
memberarray[i] = -2;
} else {
memberarray[minneighbor[i]] = -2;
}
}
}
}
}
__syncthreads();
// End outer loop
//for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
// storedmemberarray[i+offset] = memberarray[i];
// }
// __syncthreads();
//}
// Are we done?
if (shouldBreak[0]) break;
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
}
__syncthreads();
}
__global__ void sortFiltered_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *memberarray) {
__shared__ int minneighbor[2048];
__shared__ int curridx[1];
int nump = d_PointCounter[0];
if (threadIdx.x == 0) {
curridx[0] = 0;
}
// Sort array
const int upper = (nump + 2047) & (0xfffff800);
for (int i = threadIdx.x; i < upper; i += 2 * FilterExtremaThreads) {
minneighbor[threadIdx.x] =
i >= nump ? nump+1 : (memberarray[i] < 0 ? nump+1 : (kpts[memberarray[i]].size < 0 ? nump+1 : memberarray[i]));
minneighbor[threadIdx.x + 1024] =
i + 1024 >= nump ? nump+1
: (memberarray[i + 1024] < 0 ? nump+1 : (kpts[memberarray[i+1024]].size < 0 ? nump+1 : memberarray[i+1024]));
__syncthreads();
// Sort and store keypoints
#pragma unroll 1
for (int k = 1; k < 2048; k <<= 1) {
int sortdir = (threadIdx.x & k) > 0 ? 0 : 1;
#pragma unroll 1
for (int j = k; j > 0; j >>= 1) {
int mask = 0x0fffffff * j;
int tidx = ((threadIdx.x & mask) << 1) + (threadIdx.x & ~mask);
atomicSort(minneighbor, tidx, j, j * sortdir);
__syncthreads();
}
}
__syncthreads();
#pragma unroll 1
for (int k = threadIdx.x; k < 2048; k += 1024) {
if (minneighbor[k] < nump) {
// Restore subpixel component
cv::KeyPoint &okpt = kpts[minneighbor[k]];
float octsub = fabs(*(float*)(&kpts[minneighbor[k]].octave));
int octave = (int)octsub;
float subp = (*(float*)(&kpts[minneighbor[k]].octave) < 0 ? -1 : 1) * (octsub - octave);
float ratio = 1 << octave;
cv::KeyPoint &tkpt = newkpts[k + curridx[0]];
tkpt.pt.y = ratio * ((int)(0.5f+okpt.pt.y / ratio) + okpt.angle);
tkpt.pt.x = ratio * ((int)(0.5f+okpt.pt.x / ratio) + subp);
// newkpts[k + curridx[0] + threadIdx.x].angle = 0; // This will be set elsewhere
tkpt.class_id = okpt.class_id;
tkpt.octave = octave;
tkpt.response = okpt.response;
tkpt.size = okpt.size;
}
}
__syncthreads();
// How many did we add?
if (minneighbor[2047] < nump) {
curridx[0] += 2048;
} else {
if (minneighbor[1024] < nump) {
if (threadIdx.x < 1023 && minneighbor[1024 + threadIdx.x] < nump &&
minneighbor[1024 + threadIdx.x + 1] == nump+1) {
curridx[0] += 1024 + threadIdx.x + 1;
}
} else {
if (minneighbor[threadIdx.x] < nump &&
minneighbor[threadIdx.x + 1] == nump+1) {
curridx[0] += threadIdx.x + 1;
}
}
__syncthreads();
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_PointCounter[0] = curridx[0];
}
}
void FilterExtrema(cv::KeyPoint *pts, cv::KeyPoint *newpts, int* kptindices, int& nump) {
//int nump;
hipMemcpyFromSymbol(&nump, d_PointCounter, sizeof(int));
unsigned int extremaidx_h[16];
hipMemcpyFromSymbol(extremaidx_h,d_ExtremaIdx,16*sizeof(unsigned int));
int maxnump = extremaidx_h[0];
for (int i=1; i<16; ++i) {
maxnump = max(maxnump,extremaidx_h[i]-extremaidx_h[i-1]);
}
int width = ceil(21) * ceil(21);
// Sort the list of points
dim3 blocks(16, 1, 1);
dim3 threads(BitonicSortThreads, 1, 1);
if (maxnump <= 8*1024) {
bitonicSort << <blocks, threads>>> (pts, newpts);
} else {
int nump_ceil = 1;
while (nump_ceil < nump) nump_ceil <<= 1;
std::cout << "numpceil: " << nump_ceil << std::endl;
sortstruct_t<int>* sortstruct;
hipMalloc((void**)&sortstruct, nump_ceil*16*sizeof(sortstruct_t<int>));
bitonicSort_global << <blocks, threads>>> (pts, newpts, sortstruct,nump_ceil);
hipFree(sortstruct);
}
CHK
/* cv::KeyPoint* newpts_h = new cv::KeyPoint[nump];
hipMemcpy(newpts_h,newpts,nump*sizeof(cv::KeyPoint),hipMemcpyDeviceToHost);
int scale = 0;
for (int i=1; i<nump; ++i) {
cv::KeyPoint &k0 = newpts_h[i-1];
cv::KeyPoint &k1 = newpts_h[i];
std::cout << i << ": " << newpts_h[i].class_id << ": " << newpts_h[i].pt.y << " " << newpts_h[i].pt.x << ", " << newpts_h[i].size;
if (!(k0.pt.y<k1.pt.y || (k0.pt.y==k1.pt.y && k0.pt.x<k1.pt.x))) std::cout << " <<<<";
if (k1.size < 0 ) std::cout << " ##############";
std::cout << "\n";
}
*/
// Find all neighbors
hipStreamSynchronize(copyStream);
blocks.x = nump;
threads.x = FindNeighborsThreads;
FindNeighbors << <blocks, threads>>> (newpts, kptindices, width);
CHK
//hipDeviceSynchronize();
//safeCall(hipGetLastError());
// Filter extrema
blocks.x = 1;
threads.x = FilterExtremaThreads;
int *buffer1, *buffer2;
hipMalloc((void**)&buffer1, nump*sizeof(int));
hipMalloc((void**)&buffer2, nump*sizeof(int));
char* buffer3;
hipMalloc((void**)&buffer3, nump);
FilterExtrema_kernel << <blocks, threads>>> (newpts, pts, kptindices, width,
buffer1, buffer2, buffer3);
threads.x = 1024;
sortFiltered_kernel << <blocks, threads>>> (newpts, pts, buffer1);
CHK
//hipDeviceSynchronize();
//safeCall(hipGetLastError());
hipFree(buffer1);
hipFree(buffer2);
hipFree(buffer3);
hipMemcpyFromSymbolAsync(&nump, d_PointCounter, sizeof(int));
}
int GetPoints(std::vector<cv::KeyPoint> &h_pts, cv::KeyPoint *d_pts, int numPts) {
h_pts.resize(numPts);
safeCall(hipMemcpyAsync((float *)&h_pts[0], d_pts,
sizeof(cv::KeyPoint) * numPts,
hipMemcpyDeviceToHost, copyStream));
return numPts;
}
void GetDescriptors(cv::Mat &h_desc, cv::Mat &d_desc, int numPts) {
h_desc = cv::Mat(numPts, 61, CV_8U);
hipMemcpyAsync(h_desc.data, d_desc.data, numPts*61, hipMemcpyDeviceToHost, copyStream);
}
__global__ void ExtractDescriptors(cv::KeyPoint *d_pts, CudaImage *d_imgs,
float *_vals, int size2, int size3,
int size4) {
__shared__ float acc_vals[3 * 30 * EXTRACT_S];
float *acc_vals_im = &acc_vals[0];
float *acc_vals_dx = &acc_vals[30 * EXTRACT_S];
float *acc_vals_dy = &acc_vals[2 * 30 * EXTRACT_S];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
for (int i = 0; i < 30; ++i) {
acc_vals_im[i * EXTRACT_S + tx] = 0.f;
acc_vals_dx[i * EXTRACT_S + tx] = 0.f;
acc_vals_dy[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// Add 2x2
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx] += im;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 2] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// Add 3x3
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 2] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// Add 4x4
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 2] += ry;
}
}
__syncthreads();
// Reduce stuff
float acc_reg;
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
for (int d = 0; d < 90; d += 30) {
if (tx_d < 32) {
acc_reg = acc_vals[3 * 30 * tx_d + offset + d] +
acc_vals[3 * 30 * (tx_d + 32) + offset + d];
acc_reg += __shfl_down(acc_reg, 1);
acc_reg += __shfl_down(acc_reg, 2);
acc_reg += __shfl_down(acc_reg, 4);
acc_reg += __shfl_down(acc_reg, 8);
acc_reg += __shfl_down(acc_reg, 16);
}
if (tx_d == 0) {
acc_vals[offset + d] = acc_reg;
}
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = acc_vals[tx];
vals[29 + tx] = acc_vals[29 + tx];
vals[2 * 29 + tx] = acc_vals[2 * 29 + tx];
}
}
__global__ void ExtractDescriptors_serial(cv::KeyPoint *d_pts,
CudaImage *d_imgs, float *_vals,
int size2, int size3, int size4) {
__shared__ float acc_vals[30 * EXTRACT_S];
__shared__ float final_vals[3 * 30];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
// IM
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += im;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += im;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += im;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DX
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += rx;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += rx;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += rx;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DY
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += ry;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = final_vals[tx];
vals[29 + tx] = final_vals[29 + tx];
vals[2 * 29 + tx] = final_vals[2 * 29 + tx];
}
}
__global__ void BuildDescriptor(float *_valsim, unsigned char *_desc) {
int p = blockIdx.x;
size_t idx = threadIdx.x;
if (idx < 61) {
float *valsim = &_valsim[3 * 29 * p];
unsigned char *desc = &_desc[61 * p];
unsigned char desc_r = 0;
#pragma unroll
for (int i = 0; i < (idx == 60 ? 6 : 8); ++i) {
int idx1 = comp_idx_1[idx * 8 + i];
int idx2 = comp_idx_2[idx * 8 + i];
desc_r |= (valsim[idx1] > valsim[idx2] ? 1 : 0) << i;
}
desc[idx] = desc_r;
}
}
double ExtractDescriptors(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs,
unsigned char *desc_d, float* vals_d, int patsize, int numPts) {
int size2 = patsize;
int size3 = ceil(2.0f * patsize / 3.0f);
int size4 = ceil(0.5f * patsize);
//int numPts;
//hipMemcpyFromSymbol(&numPts, d_PointCounter, sizeof(int));
// TimerGPU timer0(0);
dim3 blocks(numPts);
dim3 threads(EXTRACT_S);
ExtractDescriptors << <blocks, threads>>>(d_pts, d_imgs, vals_d, size2, size3, size4);
CHK;
hipMemsetAsync(desc_d, 0, numPts * 61);
BuildDescriptor << <blocks, 64>>> (vals_d, desc_d);
CHK;
////checkMsg("ExtractDescriptors() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ExtractDescriptors time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
#define NTHREADS_MATCH 32
__global__ void MatchDescriptors(unsigned char *d1, unsigned char *d2,
int pitch, int nkpts_2, cv::DMatch *matches) {
int p = blockIdx.x;
int x = threadIdx.x;
__shared__ int idxBest[NTHREADS_MATCH];
__shared__ int idxSecondBest[NTHREADS_MATCH];
__shared__ int scoreBest[NTHREADS_MATCH];
__shared__ int scoreSecondBest[NTHREADS_MATCH];
idxBest[x] = 0;
idxSecondBest[x] = 0;
scoreBest[x] = 512;
scoreSecondBest[x] = 512;
__syncthreads();
// curent version fixed with popc, still not convinced
unsigned long long *d1i = (unsigned long long *)(d1 + pitch * p);
for (int i = 0; i < nkpts_2; i += NTHREADS_MATCH) {
unsigned long long *d2i = (unsigned long long *)(d2 + pitch * (x + i));
if (i + x < nkpts_2) {
// Check d1[p] with d2[i]
int score = 0;
#pragma unroll
for (int j = 0; j < 8; ++j) {
score += __popcll(d1i[j] ^ d2i[j]);
}
if (score < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = score;
idxSecondBest[x] = idxBest[x];
idxBest[x] = i + x;
} else if (score < scoreSecondBest[x]) {
scoreSecondBest[x] = score;
idxSecondBest[x] = i + x;
}
}
}
// for( int i=16; i>=1; i/=2) {
// int tBest = __shfl_down(scoreBest,i);
// int tIdx = __shfl_down(idxBest,i);
// if(tBest < scoreBest) {
// scoreSecondBest = scoreBest;
// idxSecondBest = idxBest;
// scoreBest = tBest;
// idxBest = tIdx;
// }
// tBest = __shfl_down(scoreSecondBest,i);
// tIdx = __shfl_down(idxSecondBest,i);
// if(tBest < scoreSecondBest) {
// scoreSecondBest = tBest;
// idxSecondBest = tIdx;
// }
// }
__syncthreads();
for (int i = NTHREADS_MATCH / 2; i >= 1; i /= 2) {
if (x < i) {
if (scoreBest[x + i] < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x];
idxBest[x] = idxBest[x + i];
} else if (scoreBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x + i];
}
if (scoreSecondBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreSecondBest[x + i];
idxSecondBest[x] = idxSecondBest[x + i];
}
}
}
// if(i>16) __syncthreads();
// if(x<i) {
// if( scoreBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreBest[x+i];
// idxSecondBest[x] = idxBest[x+i];
// } else if (scoreSecondBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreSecondBest[x+i];
// idxSecondBest[x] = idxSecondBest[x+i];
// }
// }
// if(i>16) __syncthreads();
//}
/*for (int i = 1; i <= NTHREADS_MATCH; ++i) {
if (scoreBest[i] < scoreBest[0]) {
scoreSecondBest[0] = scoreBest[0];
scoreBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[0];
idxBest[0] = idxBest[i];
} else if( scoreBest[i] < scoreSecondBest[0] ) {
scoreSecondBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[i];
}
if(scoreSecondBest[i] < scoreSecondBest[0]) {
scoreSecondBest[0] = scoreSecondBest[i];
idxSecondBest[0] = idxSecondBest[i];
}
}*/
// if(x==0) {
// matches[2*p].queryIdx = p;
// matches[2*p].trainIdx = idxBest;
// matches[2*p].distance = scoreBest;
// matches[2*p+1].queryIdx = p;
// matches[2*p+1].trainIdx = idxSecondBest;
// matches[2*p+1].distance = scoreSecondBest;
// }
if (x == 0) {
matches[2 * p].queryIdx = p;
matches[2 * p].trainIdx = idxBest[x];
matches[2 * p].distance = scoreBest[x];
matches[2 * p + 1].queryIdx = p;
matches[2 * p + 1].trainIdx = idxSecondBest[x];
matches[2 * p + 1].distance = scoreSecondBest[x];
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches,
size_t pitch,
unsigned char* descq_d, unsigned char* desct_d, cv::DMatch* dmatches_d, cv::DMatch* dmatches_h) {
dim3 block(desc_query.rows);
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch, desc_train.rows, dmatches_d);
hipMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
hipMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches) {
size_t pitch1, pitch2;
unsigned char *descq_d;
hipMallocPitch(&descq_d, &pitch1, 64, desc_query.rows);
hipMemset2D(descq_d, pitch1, 0, 64, desc_query.rows);
hipMemcpy2D(descq_d, pitch1, desc_query.data, desc_query.cols,
desc_query.cols, desc_query.rows, hipMemcpyHostToDevice);
unsigned char *desct_d;
hipMallocPitch(&desct_d, &pitch2, 64, desc_train.rows);
hipMemset2D(desct_d, pitch2, 0, 64, desc_train.rows);
hipMemcpy2D(desct_d, pitch2, desc_train.data, desc_train.cols,
desc_train.cols, desc_train.rows, hipMemcpyHostToDevice);
dim3 block(desc_query.rows);
cv::DMatch *dmatches_d;
hipMalloc(&dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch));
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch1, desc_train.rows, dmatches_d);
cv::DMatch *dmatches_h = new cv::DMatch[2 * desc_query.rows];
hipMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
hipMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
hipFree(descq_d);
hipFree(desct_d);
hipFree(dmatches_d);
delete[] dmatches_h;
}
void InitCompareIndices() {
int comp_idx_1_h[61 * 8];
int comp_idx_2_h[61 * 8];
int cntr = 0;
for (int j = 0; j < 4; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 3x3
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 4x4
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
hipMemcpyToSymbol(comp_idx_1, comp_idx_1_h, 8 * 61 * sizeof(int));
hipMemcpyToSymbol(comp_idx_2, comp_idx_2_h, 8 * 61 * sizeof(int));
}
__global__ void FindOrientation(cv::KeyPoint *d_pts, CudaImage *d_imgs) {
__shared__ float resx[42], resy[42];
__shared__ float re8x[42], re8y[42];
int p = blockIdx.x;
int tx = threadIdx.x;
if (tx < 42) resx[tx] = resy[tx] = 0.0f;
__syncthreads();
int lev = d_pts[p].class_id;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int octave = d_pts[p].octave;
int step = (int)(0.5f * d_pts[p].size + 0.5f) >> octave;
int x = (int)(d_pts[p].pt.x + 0.5f) >> octave;
int y = (int)(d_pts[p].pt.y + 0.5f) >> octave;
int i = (tx & 15) - 6;
int j = (tx / 16) - 6;
int r2 = i * i + j * j;
if (r2 < 36) {
float gweight = exp(-r2 / (2.5f * 2.5f * 2.0f));
int pos = (y + step * j) * pitch + (x + step * i);
float dx = gweight * dxd[pos];
float dy = gweight * dyd[pos];
float angle = atan2(dy, dx);
int a = max(min((int)(angle * (21 / CV_PI)) + 21, 41), 0);
atomicAdd(resx + a, dx);
atomicAdd(resy + a, dy);
}
__syncthreads();
if (tx < 42) {
re8x[tx] = resx[tx];
re8y[tx] = resy[tx];
for (int k = tx + 1; k < tx + 7; k++) {
re8x[tx] += resx[k < 42 ? k : k - 42];
re8y[tx] += resy[k < 42 ? k : k - 42];
}
}
__syncthreads();
if (tx == 0) {
float maxr = 0.0f;
int maxk = 0;
for (int k = 0; k < 42; k++) {
float r = re8x[k] * re8x[k] + re8y[k] * re8y[k];
if (r > maxr) {
maxr = r;
maxk = k;
}
}
float angle = atan2(re8y[maxk], re8x[maxk]);
d_pts[p].angle = (angle < 0.0f ? angle + 2.0f * CV_PI : angle);
// printf("XXX %.2f %.2f %.2f\n", d_pts[p].pt.x, d_pts[p].pt.y,
// d_pts[p].angle/CV_PI*180.0f);
}
}
double FindOrientation(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs, int numPts) {
safeCall(hipMemcpyAsync(d_imgs, (float *)&h_imgs[0],
sizeof(CudaImage) * h_imgs.size(),
hipMemcpyHostToDevice));
// TimerGPU timer0(0);
hipStreamSynchronize(0);
dim3 blocks(numPts);
dim3 threads(ORIENT_S);
FindOrientation << <blocks, threads>>> (d_pts, d_imgs);
CHK
// checkMsg("FindOrientation() execution failed\n");
// safeCall(hipDeviceSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindOrientation time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
| c6082050bfdb4b46745b35125e93f9738e36e7fb.cu | #include <opencv2/features2d/features2d.hpp>
#include "cuda_akaze.h"
#include "cudautils.h"
#define CONVROW_W 160
#define CONVCOL_W 32
#define CONVCOL_H 40
#define CONVCOL_S 8
#define SCHARR_W 32
#define SCHARR_H 16
#define NLDSTEP_W 32
#define NLDSTEP_H 13
#define ORIENT_S (13 * 16)
#define EXTRACT_S 64
__device__ __constant__ float d_Kernel[21];
__device__ unsigned int d_PointCounter[1];
__device__ unsigned int d_ExtremaIdx[16];
__device__ __constant__ int comp_idx_1[61 * 8];
__device__ __constant__ int comp_idx_2[61 * 8];
cudaStream_t copyStream;
//__device__ __constant__ float norm_factors[29];
#if 1
#define CHK
#else
#define CHK cudaDeviceSynchronize(); \
{ \
cudaError_t cuerr = cudaGetLastError(); \
if (cuerr) { \
std::cout << "Cuda error " << cudaGetErrorString(cuerr) << ". at " << __FILE__ << ":" << __LINE__ << std::endl; \
} \
}
#endif
void WaitCuda() {
cudaStreamSynchronize(copyStream);
}
struct Conv_t {
float *d_Result;
float *d_Data;
int width;
int pitch;
int height;
};
template <int RADIUS>
__global__ void ConvRowGPU(struct Conv_t s) {
//__global__ void ConvRowGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVROW_W + 2 * RADIUS];
const int tx = threadIdx.x;
const int minx = blockIdx.x * CONVROW_W;
const int maxx = min(minx + CONVROW_W, s.width);
const int yptr = blockIdx.y * s.pitch;
const int loadPos = minx + tx - RADIUS;
const int writePos = minx + tx;
if (loadPos < 0)
data[tx] = s.d_Data[yptr];
else if (loadPos >= s.width)
data[tx] = s.d_Data[yptr + s.width - 1];
else
data[tx] = s.d_Data[yptr + loadPos];
__syncthreads();
if (writePos < maxx && tx < CONVROW_W) {
float sum = 0.0f;
for (int i = 0; i <= (2 * RADIUS); i++) sum += data[tx + i] * d_Kernel[i];
s.d_Result[yptr + writePos] = sum;
}
}
///////////////////////////////////////////////////////////////////////////////
// Column convolution filter
///////////////////////////////////////////////////////////////////////////////
template <int RADIUS>
__global__ void ConvColGPU(struct Conv_t s) {
//__global__ void ConvColGPU(float *d_Result, float *d_Data, int width, int
//pitch, int height) {
__shared__ float data[CONVCOL_W * (CONVCOL_H + 2 * RADIUS)];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int miny = blockIdx.y * CONVCOL_H;
const int maxy = min(miny + CONVCOL_H, s.height) - 1;
const int totStart = miny - RADIUS;
const int totEnd = maxy + RADIUS;
const int colStart = blockIdx.x * CONVCOL_W + tx;
const int colEnd = colStart + (s.height - 1) * s.pitch;
const int smemStep = CONVCOL_W * CONVCOL_S;
const int gmemStep = s.pitch * CONVCOL_S;
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (totStart + ty) * s.pitch;
for (int y = totStart + ty; y <= totEnd; y += blockDim.y) {
if (y < 0)
data[smemPos] = s.d_Data[colStart];
else if (y >= s.height)
data[smemPos] = s.d_Data[colEnd];
else
data[smemPos] = s.d_Data[gmemPos];
smemPos += smemStep;
gmemPos += gmemStep;
}
}
__syncthreads();
if (colStart < s.width) {
int smemPos = ty * CONVCOL_W + tx;
int gmemPos = colStart + (miny + ty) * s.pitch;
for (int y = miny + ty; y <= maxy; y += blockDim.y) {
float sum = 0.0f;
for (int i = 0; i <= 2 * RADIUS; i++)
sum += data[smemPos + i * CONVCOL_W] * d_Kernel[i];
s.d_Result[gmemPos] = sum;
smemPos += smemStep;
gmemPos += gmemStep;
}
}
}
template <int RADIUS>
double SeparableFilter(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
float *h_Kernel) {
int width = inimg.width;
int pitch = inimg.pitch;
int height = inimg.height;
float *d_DataA = inimg.d_data;
float *d_DataB = outimg.d_data;
float *d_Temp = temp.d_data;
if (d_DataA == NULL || d_DataB == NULL || d_Temp == NULL) {
printf("SeparableFilter: missing data\n");
return 0.0;
}
// TimerGPU timer0(0);
const unsigned int kernelSize = (2 * RADIUS + 1) * sizeof(float);
safeCall(cudaMemcpyToSymbolAsync(d_Kernel, h_Kernel, kernelSize));
dim3 blockGridRows(iDivUp(width, CONVROW_W), height);
dim3 threadBlockRows(CONVROW_W + 2 * RADIUS);
struct Conv_t s;
s.d_Result = d_Temp;
s.d_Data = d_DataA;
s.width = width;
s.pitch = pitch;
s.height = height;
ConvRowGPU<RADIUS> << <blockGridRows, threadBlockRows>>> (s);
// checkMsg("ConvRowGPU() execution failed\n");
// safeCall(cudaThreadSynchronize());
dim3 blockGridColumns(iDivUp(width, CONVCOL_W), iDivUp(height, CONVCOL_H));
dim3 threadBlockColumns(CONVCOL_W, CONVCOL_S);
s.d_Result = d_DataB;
s.d_Data = d_Temp;
ConvColGPU<RADIUS> << <blockGridColumns, threadBlockColumns>>> (s);
// checkMsg("ConvColGPU() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("SeparableFilter time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
template <int RADIUS>
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp,
double var) {
float kernel[2 * RADIUS + 1];
float kernelSum = 0.0f;
for (int j = -RADIUS; j <= RADIUS; j++) {
kernel[j + RADIUS] = (float)expf(-(double)j * j / 2.0 / var);
kernelSum += kernel[j + RADIUS];
}
for (int j = -RADIUS; j <= RADIUS; j++) kernel[j + RADIUS] /= kernelSum;
return SeparableFilter<RADIUS>(inimg, outimg, temp, kernel);
}
double LowPass(CudaImage &inimg, CudaImage &outimg, CudaImage &temp, double var,
int kernsize) {
if (kernsize <= 5)
return LowPass<2>(inimg, outimg, temp, var);
else if (kernsize <= 7)
return LowPass<3>(inimg, outimg, temp, var);
else if (kernsize <= 9)
return LowPass<4>(inimg, outimg, temp, var);
else {
if (kernsize > 11)
std::cerr << "Kernels larger than 11 not implemented" << std::endl;
return LowPass<5>(inimg, outimg, temp, var);
}
}
__global__ void Scharr(float *imgd, float *lxd, float *lyd, int width,
int pitch, int height) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
lxd[y * pitch + x] = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
lyd[y * pitch + x] = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
}
}
double Scharr(CudaImage &img, CudaImage &lx, CudaImage &ly) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Scharr << <blocks, threads>>>
(img.d_data, lx.d_data, ly.d_data, img.width, img.pitch, img.height);
// checkMsg("Scharr() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("Scharr time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Flow(float *imgd, float *flowd, int width, int pitch,
int height, DIFFUSIVITY_TYPE type, float invk) {
#define BW (SCHARR_W + 2)
__shared__ float buffer[BW * (SCHARR_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * SCHARR_W + tx;
int y = blockIdx.y * SCHARR_H + ty;
int xp = (x == 0 ? 1 : (x > width ? width - 2 : x - 1));
int yp = (y == 0 ? 1 : (y > height ? height - 2 : y - 1));
buffer[ty * BW + tx] = imgd[yp * pitch + xp];
__syncthreads();
if (x < width && y < height && tx < SCHARR_W && ty < SCHARR_H) {
float *b = buffer + (ty + 1) * BW + (tx + 1);
float ul = b[-BW - 1];
float ur = b[-BW + 1];
float ll = b[+BW - 1];
float lr = b[+BW + 1];
float lx = 3.0f * (lr - ll + ur - ul) + 10.0f * (b[+1] - b[-1]);
float ly = 3.0f * (lr + ll - ur - ul) + 10.0f * (b[BW] - b[-BW]);
float dif2 = invk * (lx * lx + ly * ly);
if (type == PM_G1)
flowd[y * pitch + x] = exp(-dif2);
else if (type == PM_G2)
flowd[y * pitch + x] = 1.0f / (1.0f + dif2);
else if (type == WEICKERT)
flowd[y * pitch + x] = 1.0f - exp(-3.315 / (dif2 * dif2 * dif2 * dif2));
else
flowd[y * pitch + x] = 1.0f / sqrt(1.0f + dif2);
}
}
double Flow(CudaImage &img, CudaImage &flow, DIFFUSIVITY_TYPE type,
float kcontrast) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, SCHARR_W), iDivUp(img.height, SCHARR_H));
dim3 threads(SCHARR_W + 2, SCHARR_H + 2);
Flow << <blocks, threads>>> (img.d_data, flow.d_data, img.width, img.pitch,
img.height, type,
1.0f / (kcontrast * kcontrast));
// checkMsg("Flow() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("Flow time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
struct NLDStep_t {
float *imgd;
float *flod;
float *temd;
int width;
int pitch;
int height;
float stepsize;
};
//__global__ void NLDStep(float *imgd, float *flod, float *temd, int width, int
// pitch, int height, float stepsize)
__global__ void NLDStep(NLDStep_t s) {
#undef BW
#define BW (NLDSTEP_W + 2)
__shared__ float ibuff[BW * (NLDSTEP_H + 2)];
__shared__ float fbuff[BW * (NLDSTEP_H + 2)];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * NLDSTEP_W + tx;
int y = blockIdx.y * NLDSTEP_H + ty;
int xp = (x == 0 ? 0 : (x > s.width ? s.width - 1 : x - 1));
int yp = (y == 0 ? 0 : (y > s.height ? s.height - 1 : y - 1));
ibuff[ty * BW + tx] = s.imgd[yp * s.pitch + xp];
fbuff[ty * BW + tx] = s.flod[yp * s.pitch + xp];
__syncthreads();
if (tx < NLDSTEP_W && ty < NLDSTEP_H && x < s.width && y < s.height) {
float *ib = ibuff + (ty + 1) * BW + (tx + 1);
float *fb = fbuff + (ty + 1) * BW + (tx + 1);
float ib0 = ib[0];
float fb0 = fb[0];
float xpos = (fb0 + fb[+1]) * (ib[+1] - ib0);
float xneg = (fb0 + fb[-1]) * (ib0 - ib[-1]);
float ypos = (fb0 + fb[+BW]) * (ib[+BW] - ib0);
float yneg = (fb0 + fb[-BW]) * (ib0 - ib[-BW]);
s.temd[y * s.pitch + x] = s.stepsize * (xpos - xneg + ypos - yneg);
}
}
struct NLDUpdate_t {
float *imgd;
float *temd;
int width;
int pitch;
int height;
};
//__global__ void NLDUpdate(float *imgd, float *temd, int width, int pitch, int
// height)
__global__ void NLDUpdate(NLDUpdate_t s) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x < s.width && y < s.height) {
int p = y * s.pitch + x;
s.imgd[p] = s.imgd[p] + s.temd[p];
}
}
double NLDStep(CudaImage &img, CudaImage &flow, CudaImage &temp,
float stepsize) {
// TimerGPU timer0(0);
dim3 blocks0(iDivUp(img.width, NLDSTEP_W), iDivUp(img.height, NLDSTEP_H));
dim3 threads0(NLDSTEP_W + 2, NLDSTEP_H + 2);
NLDStep_t s;
s.imgd = img.d_data;
s.flod = flow.d_data;
s.temd = temp.d_data;
s.width = img.width;
s.pitch = img.pitch;
s.height = img.height;
s.stepsize = 0.5 * stepsize;
// NLDStep<<<blocks0, threads0>>>(img.d_data, flow.d_data, temp.d_data,
// img.width, img.pitch, img.height, 0.5f*stepsize);
NLDStep << <blocks0, threads0>>> (s);
// checkMsg("NLDStep() execution failed\n");
// safeCall(cudaThreadSynchronize());
dim3 blocks1(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads1(32, 16);
NLDUpdate_t su;
su.imgd = img.d_data;
su.temd = temp.d_data;
su.width = img.width;
su.height = img.height;
su.pitch = img.pitch;
// NLDUpdate<<<blocks1, threads1>>>(img.d_data, temp.d_data, img.width,
// img.pitch, img.height);
NLDUpdate << <blocks1, threads1>>> (su);
// checkMsg("NLDUpdate() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // = timer0.read();
#ifdef VERBOSE
printf("NLDStep time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void HalfSample(float *iimd, float *oimd, int iwidth, int iheight,
int ipitch, int owidth, int oheight, int opitch) {
__shared__ float buffer[16 * 33];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * 16 + tx;
int y = blockIdx.y * 16 + ty;
if (x >= owidth || y >= oheight) return;
float *ptri = iimd + (2 * y) * ipitch + (2 * x);
if (2 * owidth == iwidth) {
buffer[ty * 32 + tx] = owidth * (ptri[0] + ptri[1]);
ptri += ipitch;
buffer[ty * 32 + tx + 16] = owidth * (ptri[0] + ptri[1]);
if (ty == 15) {
ptri += ipitch;
buffer[tx + 32 * 16] = owidth * (ptri[0] + ptri[1]);
} else if (y * 2 + 3 == iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] = owidth * (ptri[0] + ptri[1]);
}
} else {
float f0 = owidth - x;
float f2 = 1 + x;
buffer[ty * 32 + tx] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
ptri += ipitch;
buffer[ty * 32 + tx + 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
if (ty == 15 && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * 16] = f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[1];
} else if (y * 2 + 3 == iheight && 2 * oheight != iheight) {
ptri += ipitch;
buffer[tx + 32 * (ty + 1)] =
f0 * ptri[0] + owidth * ptri[1] + f2 * ptri[2];
}
}
__syncthreads();
float *buff = buffer + 32 * ty + tx;
if (2 * oheight == iheight)
oimd[y * opitch + x] = oheight * (buff[0] + buff[16]) / (iwidth * iheight);
else {
float f0 = oheight - y;
float f2 = 1 + y;
oimd[y * opitch + x] = (f0 * buff[0] + oheight * buff[16] + f2 * buff[32]) /
(iwidth * iheight);
}
}
__global__ void HalfSample2(float *iimd, float *oimd, int ipitch, int owidth,
int oheight, int opitch) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= owidth || y >= oheight) return;
float *ptr = iimd + (2 * y) * ipitch + (2 * x);
oimd[y * opitch + x] =
0.25f * (ptr[0] + ptr[1] + ptr[ipitch + 0] + ptr[ipitch + 1]);
}
double HalfSample(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
if (inimg.width == 2 * outimg.width && inimg.height == 2 * outimg.height) {
dim3 blocks(iDivUp(outimg.width, 32), iDivUp(outimg.height, 16));
dim3 threads(32, 16);
HalfSample2 << <blocks, threads>>> (inimg.d_data, outimg.d_data,
inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
} else {
dim3 blocks(iDivUp(outimg.width, 16), iDivUp(outimg.height, 16));
dim3 threads(16, 16);
HalfSample << <blocks, threads>>> (inimg.d_data, outimg.d_data, inimg.width,
inimg.height, inimg.pitch, outimg.width,
outimg.height, outimg.pitch);
}
// checkMsg("HalfSample() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HalfSample time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
double Copy(CudaImage &inimg, CudaImage &outimg) {
// TimerGPU timer0(0);
double gpuTime = 0; // timer0.read();
safeCall(cudaMemcpy2DAsync(outimg.d_data, sizeof(float) * outimg.pitch,
inimg.d_data, sizeof(float) * outimg.pitch,
sizeof(float) * inimg.width, inimg.height,
cudaMemcpyDeviceToDevice));
#ifdef VERBOSE
printf("Copy time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
float *AllocBuffers(int width, int height, int num, int omax, int &maxpts,
std::vector<CudaImage> &buffers, cv::KeyPoint *&pts,
cv::KeyPoint *&ptsbuffer, int *&ptindices, unsigned char *&desc, float *&descbuffer, CudaImage *&ims) {
maxpts = 4 * ((maxpts+3)/4);
buffers.resize(omax * num);
int w = width;
int h = height;
int p = iAlignUp(w, 128);
int size = 0;
for (int i = 0; i < omax; i++) {
for (int j = 0; j < num; j++) {
CudaImage &buf = buffers[i * num + j];
buf.width = w;
buf.height = h;
buf.pitch = p;
buf.d_data = (float *)((long)size);
size += h * p;
}
w /= 2;
h /= 2;
p = iAlignUp(w, 128);
}
int ptsstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int ptsbufferstart = size;
size += sizeof(cv::KeyPoint) * maxpts / sizeof(float);
int descstart = size;
size += sizeof(unsigned char)*maxpts*61/sizeof(float);
int descbufferstart = size;
size += sizeof(float)*3*29*maxpts / sizeof(float);
int indicesstart = size;
size += 21*21*sizeof(int)*maxpts/sizeof(float);
int imgstart = size;
size += sizeof(CudaImage) * (num * omax + sizeof(float) - 1) / sizeof(float);
float *memory = NULL;
size_t pitch;
std::cout << "allocating " << size/1024./1024. << " Mbytes of gpu memory\n";
safeCall(cudaMallocPitch((void **)&memory, &pitch, (size_t)4096,
(size + 4095) / 4096 * sizeof(float)));
for (int i = 0; i < omax * num; i++) {
CudaImage &buf = buffers[i];
buf.d_data = memory + (long)buf.d_data;
}
pts = (cv::KeyPoint *)(memory + ptsstart);
ptsbuffer = (cv::KeyPoint *)(memory + ptsbufferstart);
desc = (unsigned char *)(memory + descstart);
descbuffer = (float*)(memory + descbufferstart);
ptindices = (int*)(memory + indicesstart);
ims = (CudaImage *)(memory + imgstart);
InitCompareIndices();
cudaStreamCreate(©Stream);
return memory;
}
void FreeBuffers(float *buffers) { safeCall(cudaFree(buffers)); }
__device__ unsigned int d_Maxval[1];
__device__ int d_Histogram[512];
#define CONTRAST_W 64
#define CONTRAST_H 7
#define HISTCONT_W 64
#define HISTCONT_H 8
#define HISTCONT_R 4
__global__ void MaxContrast(float *imgd, float *cond, int width, int pitch,
int height) {
#define WID (CONTRAST_W + 2)
__shared__ float buffer[WID * (CONTRAST_H + 2)];
__shared__ unsigned int maxval[32];
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx < 32 && !ty) maxval[tx] = 0.0f;
__syncthreads();
int x = blockIdx.x * CONTRAST_W + tx;
int y = blockIdx.y * CONTRAST_H + ty;
if (x >= width || y >= height) return;
float *b = buffer + ty * WID + tx;
b[0] = imgd[y * pitch + x];
__syncthreads();
if (tx < CONTRAST_W && ty < CONTRAST_H && x < width - 2 && y < height - 2) {
float dx = 3.0f * (b[0] - b[2] + b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[WID] - b[WID + 2]);
float dy = 3.0f * (b[0] + b[2] - b[2 * WID] - b[2 * WID + 2]) +
10.0f * (b[1] - b[2 * WID + 1]);
float grad = sqrt(dx * dx + dy * dy);
cond[(y + 1) * pitch + (x + 1)] = grad;
unsigned int *gradi = (unsigned int *)&grad;
atomicMax(maxval + (tx & 31), *gradi);
}
__syncthreads();
if (tx < 32 && !ty) atomicMax(d_Maxval, maxval[tx]);
}
__global__ void HistContrast(float *cond, int width, int pitch, int height,
float imaxval, int nbins) {
__shared__ int hist[512];
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = ty * HISTCONT_W + tx;
if (i < nbins) hist[i] = 0;
__syncthreads();
int x = blockIdx.x * HISTCONT_W + tx;
int y = blockIdx.y * HISTCONT_H * HISTCONT_R + ty;
if (x > 0 && x < width - 1) {
for (int i = 0; i < HISTCONT_R; i++) {
if (y > 0 && y < height - 1) {
int idx = min((int)(nbins * cond[y * pitch + x] * imaxval), nbins - 1);
atomicAdd(hist + idx, 1);
}
y += HISTCONT_H;
}
}
__syncthreads();
if (i < nbins && hist[i] > 0) atomicAdd(d_Histogram + i, hist[i]);
}
double ContrastPercentile(CudaImage &img, CudaImage &temp, CudaImage &blur,
float perc, int nbins, float &contrast) {
// TimerGPU timer0(0);
LowPass(img, blur, temp, 1.0f, 5);
float h_Maxval = 0.0f;
safeCall(cudaMemcpyToSymbolAsync(d_Maxval, &h_Maxval, sizeof(float)));
dim3 blocks1(iDivUp(img.width, CONTRAST_W), iDivUp(img.height, CONTRAST_H));
dim3 threads1(CONTRAST_W + 2, CONTRAST_H + 2);
MaxContrast << <blocks1, threads1>>>
(blur.d_data, temp.d_data, blur.width, blur.pitch, blur.height);
// checkMsg("MaxContrast() execution failed\n");
// safeCall(cudaThreadSynchronize());
safeCall(cudaMemcpyFromSymbolAsync(&h_Maxval, d_Maxval, sizeof(float)));
if (nbins > 512) {
printf(
"Warning: Largest number of possible bins in ContrastPercentile() is "
"512\n");
nbins = 512;
}
int h_Histogram[512];
memset(h_Histogram, 0, nbins * sizeof(int));
safeCall(
cudaMemcpyToSymbolAsync(d_Histogram, h_Histogram, nbins * sizeof(int)));
dim3 blocks2(iDivUp(temp.width, HISTCONT_W),
iDivUp(temp.height, HISTCONT_H * HISTCONT_R));
dim3 threads2(HISTCONT_W, HISTCONT_H);
HistContrast << <blocks2, threads2>>> (temp.d_data, temp.width, temp.pitch,
temp.height, 1.0f / h_Maxval, nbins);
safeCall(
cudaMemcpyFromSymbolAsync(h_Histogram, d_Histogram, nbins * sizeof(int)));
int npoints = (temp.width - 2) * (temp.height - 2);
int nthreshold = (int)(npoints * perc);
int k = 0, nelements = 0;
for (k = 0; nelements < nthreshold && k < nbins; k++)
nelements += h_Histogram[k];
contrast = (nelements < nthreshold ? 0.03f : h_Maxval * ((float)k / nbins));
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ContrastPercentile time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void Derivate(float *imd, float *lxd, float *lyd, int width,
int pitch, int height, int step, float fac1,
float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = imd[yl * pitch + xl];
float ur = imd[yl * pitch + xh];
float ll = imd[yh * pitch + xl];
float lr = imd[yh * pitch + xh];
float cl = imd[y * pitch + xl];
float cr = imd[y * pitch + xh];
lxd[y * pitch + x] = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = imd[yl * pitch + x];
float lc = imd[yh * pitch + x];
lyd[y * pitch + x] = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
}
__global__ void HessianDeterminant(float *lxd, float *lyd, float *detd,
int width, int pitch, int height, int step,
float fac1, float fac2) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
if (x >= width || y >= height) return;
int xl = (x < step ? step - x : x - step);
int xh = (x >= width - step ? 2 * width - x - step - 2 : x + step);
int yl = (y < step ? step - y : y - step);
int yh = (y >= height - step ? 2 * height - y - step - 2 : y + step);
float ul = lxd[yl * pitch + xl];
float ur = lxd[yl * pitch + xh];
float ll = lxd[yh * pitch + xl];
float lr = lxd[yh * pitch + xh];
float cl = lxd[y * pitch + xl];
float cr = lxd[y * pitch + xh];
float lxx = fac1 * (ur + lr - ul - ll) + fac2 * (cr - cl);
float uc = lxd[yl * pitch + x];
float lc = lxd[yh * pitch + x];
float lyx = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
ul = lyd[yl * pitch + xl];
ur = lyd[yl * pitch + xh];
ll = lyd[yh * pitch + xl];
lr = lyd[yh * pitch + xh];
uc = lyd[yl * pitch + x];
lc = lyd[yh * pitch + x];
float lyy = fac1 * (lr + ll - ur - ul) + fac2 * (lc - uc);
detd[y * pitch + x] = lxx * lyy - lyx * lyx;
}
double HessianDeterminant(CudaImage &img, CudaImage &lx, CudaImage &ly,
int step) {
// TimerGPU timer0(0);
float w = 10.0 / 3.0;
float fac1 = 1.0 / (2.0 * (w + 2.0));
float fac2 = w * fac1;
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
Derivate << <blocks, threads>>> (img.d_data, lx.d_data, ly.d_data, img.width,
img.pitch, img.height, step, fac1, fac2);
// checkMsg("Derivate() execution failed\n");
// safeCall(cudaThreadSynchronize());
HessianDeterminant << <blocks, threads>>> (lx.d_data, ly.d_data, img.d_data,
img.width, img.pitch, img.height,
step, fac1, fac2);
// checkMsg("HessianDeterminant() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("HessianDeterminant time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
__global__ void FindExtrema(float *imd, float *imp, float *imn, int maxx,
int pitch, int maxy, float border, float dthreshold,
int scale, int octave, float size,
cv::KeyPoint *pts, int maxpts) {
int x = blockIdx.x * 32 + threadIdx.x;
int y = blockIdx.y * 16 + threadIdx.y;
int left_x = (int)(x - border + 0.5f) - 1;
int right_x = (int)(x + border + 0.5f) + 1;
int up_y = (int)(y - border + 0.5f) - 1;
int down_y = (int)(y + border + 0.5f) + 1;
if (left_x < 0 || right_x >= maxx || up_y < 0 || down_y >= maxy) return;
int p = y * pitch + x;
float v = imd[p];
if (v > dthreshold && v > imd[p - pitch - 1] && v > imd[p + pitch + 1] &&
v > imd[p + pitch - 1] && v > imd[p - pitch + 1] && v > imd[p - 1] &&
v > imd[p + 1] && v > imd[p + pitch] && v > imd[p - pitch]) {
float dx = 0.5f * (imd[p + 1] - imd[p - 1]);
float dy = 0.5f * (imd[p + pitch] - imd[p - pitch]);
float dxx = imd[p + 1] + imd[p - 1] - 2.0f * v;
float dyy = imd[p + pitch] + imd[p - pitch] - 2.0f * v;
float dxy = 0.25f * (imd[p + pitch + 1] + imd[p - pitch - 1] -
imd[p + pitch - 1] - imd[p - pitch + 1]);
float det = dxx * dyy - dxy * dxy;
float idet = (det != 0.0f ? 1.0f / det : 0.0f);
float dst0 = idet * (dxy * dy - dyy * dx);
float dst1 = idet * (dxy * dx - dxx * dy);
bool weak = true;
if (dst0 >= -1.0f && dst0 <= 1.0f && dst1 >= -1.0f && dst1 <= 1.0f) {
weak = 0;
}
unsigned int idx = atomicInc(d_PointCounter, 0x7fffffff);
if (idx < maxpts) {
cv::KeyPoint &point = pts[idx];
point.response = v;
point.size = (weak ? -1 : 1) * 2.0 * size;
float octsub = (dst0 < 0 ? -1 : 1) * (octave + fabs(dst0));
*(float *)(&point.octave) = (weak ? octave : octsub);
point.class_id = scale;
int ratio = (1 << octave);
point.pt.x = ratio * (x);
point.pt.y = ratio * (y);
point.angle = dst1;
} else {
atomicAdd(d_PointCounter,-1);
}
}
}
__global__ void CopyIdxArray(int scale) {
d_ExtremaIdx[scale] = d_PointCounter[0];
}
double FindExtrema(CudaImage &img, CudaImage &imgp, CudaImage &imgn,
float border, float dthreshold, int scale, int octave,
float size, cv::KeyPoint *pts, int maxpts) {
// TimerGPU timer0(0);
dim3 blocks(iDivUp(img.width, 32), iDivUp(img.height, 16));
dim3 threads(32, 16);
float b = border;
FindExtrema << <blocks, threads>>>
(img.d_data, imgp.d_data, imgn.d_data, img.width, img.pitch, img.height,
b, dthreshold, scale, octave, size, pts, maxpts);
CopyIdxArray << <1, 1>>> (scale);
CHK
// checkMsg("FindExtrema() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindExtrema time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
void ClearPoints() {
int totPts = 0;
safeCall(cudaMemcpyToSymbolAsync(d_PointCounter, &totPts, sizeof(int)));
}
__forceinline__ __device__ void atomicSort(int *pts, int shmidx, int offset,
int sortdir) {
int &p0 = pts[shmidx + sortdir];
int &p1 = pts[shmidx + (offset - sortdir)];
if (p0 < p1) {
int t = p0;
p0 = p1;
p1 = t;
}
}
__forceinline__ __device__ bool atomicCompare(const cv::KeyPoint &i,
const cv::KeyPoint &j) {
float t = i.pt.x * j.pt.x;
if (t == 0) {
if (j.pt.x != 0) {
return false;
} else {
return true;
}
}
if (i.pt.y < j.pt.y) return true;
if (i.pt.y == j.pt.y && i.pt.x < j.pt.x) return true;
return false;
}
template <typename T>
struct sortstruct_t {
T idx;
short x;
short y;
};
template <typename T>
__forceinline__ __device__ bool atomicCompare(const sortstruct_t<T> &i,
const sortstruct_t<T> &j) {
int t = i.x * j.x;
if (t == 0) {
if (j.x != 0) {
return false;
} else {
return true;
}
}
if (i.y < j.y) return true;
if (i.y == j.y && i.x < j.x) return true;
return false;
}
template <typename T>
__forceinline__ __device__ void atomicSort(sortstruct_t<T> *pts, int shmidx,
int offset, int sortdir) {
sortstruct_t<T> &p0 = pts[(shmidx + sortdir)];
sortstruct_t<T> &p1 = pts[(shmidx + (offset - sortdir))];
if (atomicCompare(p0, p1)) {
int idx = p0.idx;
short ptx = p0.x;
short pty = p0.y;
p0.idx = p1.idx;
p0.x = p1.x;
p0.y = p1.y;
p1.idx = idx;
p1.x = ptx;
p1.y = pty;
}
}
#define BitonicSortThreads 1024
template <class T>
__global__ void bitonicSort(const T *pts, T *newpts) {
int scale = blockIdx.x;
__shared__ struct sortstruct_t<short> shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
for (int i = threadIdx.x; i < 8192;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<8192; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<4096; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < 8192; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
template <class T>
__global__ void bitonicSort_global(const T *pts, T *newpts, sortstruct_t<int>* _shm, int _sz) {
int scale = blockIdx.x;
//__shared__ struct sortstruct_t shm[8192];
int first = scale == 0 ? 0 : d_ExtremaIdx[scale - 1];
int last = d_ExtremaIdx[scale];
int nkpts = last - first;
const cv::KeyPoint *tmpg = &pts[first];
int nkpts_ceil = 1;
while (nkpts_ceil < nkpts) nkpts_ceil *= 2;
sortstruct_t<int> *shm = &(_shm[_sz*blockIdx.x]);
for (int i = threadIdx.x; i < nkpts_ceil;
i += BitonicSortThreads) {
if (i < nkpts) {
shm[i].idx = i;
shm[i].y = (short)tmpg[i].pt.y;
shm[i].x = (short)tmpg[i].pt.x;
} else {
shm[i].idx = -1;
shm[i].y = 0;
shm[i].x = 0;
}
}
__syncthreads();
for (int i=1; i<nkpts_ceil; i <<= 1) {
for (int j=i; j>0; j >>= 1) {
int tx = threadIdx.x;
int mask = 0x0fffffff * j;
for (int idx=0; idx<nkpts_ceil/2; idx+=BitonicSortThreads) {
int sortdir = (tx & i) > 0 ? 0 : 1;
int tidx = ((tx & mask) << 1) + (tx & ~mask);
atomicSort(shm, tidx, j, j*sortdir);
tx += BitonicSortThreads;
__syncthreads();
}
}
}
cv::KeyPoint *tmpnewg = &newpts[first];
for (int i = 0; i < nkpts_ceil; i += BitonicSortThreads) {
if (i + threadIdx.x < nkpts) {
tmpnewg[i + threadIdx.x].angle = tmpg[shm[i + threadIdx.x].idx].angle;
tmpnewg[i + threadIdx.x].class_id = tmpg[shm[i + threadIdx.x].idx].class_id;
tmpnewg[i + threadIdx.x].octave = tmpg[shm[i + threadIdx.x].idx].octave;
tmpnewg[i + threadIdx.x].pt.y = tmpg[shm[i + threadIdx.x].idx].pt.y;
tmpnewg[i + threadIdx.x].pt.x = tmpg[shm[i + threadIdx.x].idx].pt.x;
tmpnewg[i + threadIdx.x].response =
tmpg[shm[i + threadIdx.x].idx].response;
tmpnewg[i + threadIdx.x].size = tmpg[shm[i + threadIdx.x].idx].size;
}
}
}
#define FindNeighborsThreads 32
__global__ void FindNeighbors(cv::KeyPoint *pts, int *kptindices, int width) {
__shared__ int gidx[1];
// which scale?
int scale = pts[blockIdx.x].class_id;
int cmpIdx = scale < 1 ? 0 : d_ExtremaIdx[scale - 1];
float size = pts[blockIdx.x].size;
gidx[0] = 1;
__syncthreads();
// One keypoint per block.
cv::KeyPoint &kpt = pts[blockIdx.x];
// Key point to compare. Only compare with smaller than current
// Iterate backwards instead and break as soon as possible!
//for (int i = cmpIdx + threadIdx.x; i < blockIdx.x; i += FindNeighborsThreads) {
for (int i=blockIdx.x-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
//if (fabs(kpt.pt.y-kpt_cmp.pt.y) > size*.5f) continue;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
if (scale > 0) {
int startidx = d_ExtremaIdx[scale-1];
cmpIdx = scale < 2 ? 0 : d_ExtremaIdx[scale - 2];
for (int i=startidx-threadIdx.x-1; i >= cmpIdx; i -= FindNeighborsThreads) {
cv::KeyPoint &kpt_cmp = pts[i];
if (kpt_cmp.pt.y-kpt.pt.y > size*.5f) continue;
if (kpt.pt.y-kpt_cmp.pt.y > size*.5f) break;
float dist = (kpt.pt.x - kpt_cmp.pt.x) * (kpt.pt.x - kpt_cmp.pt.x) +
(kpt.pt.y - kpt_cmp.pt.y) * (kpt.pt.y - kpt_cmp.pt.y);
if (dist < size * size * 0.25) {
int idx = atomicAdd(gidx, 1);
kptindices[blockIdx.x * width + idx] = i;
}
}
}
__syncthreads();
if (threadIdx.x == 0) {
kptindices[blockIdx.x * width] = gidx[0];
}
}
// TODO Intermediate storage of memberarray and minneighbor
#define FilterExtremaThreads 1024
__global__ void FilterExtrema_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *kptindices, int width,
int *memberarray,
int *minneighbor,
char *shouldAdd) {
// -1 means not processed
// -2 means added but replaced
// >=0 means added
__shared__ bool shouldBreak[1];
int nump = d_PointCounter[0];
// Initially all points are unprocessed
for (int i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
memberarray[i] = -1;
}
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
// Loop until there are no more points to process
for (int xx=0; xx<10000; ++xx) {
//while (true) {
// Outer loop to handle more than 8*1024 points
// Start by restoring memberarray
// Make sure to add appropriate offset to indices
// for (int offset=0; offset<nump; offset += 8*1024) {
// memberarray[i] = storedmemberarray[i+offset];
//for (int offset=0; offset<nump; offset += 8*1024) {
// Mark all points for addition and no minimum neighbor
//int maxi = nump-offset >= 8*1024 ? 8*1024 : nump-offset;
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
minneighbor[i] = nump+1;
shouldAdd[i] = true;
}
__syncthreads();
// Look through all points. If there are points that have not been processed,
// disable breaking and check if it has no processed neighbors (add), has all processed
// neighbors (compare with neighbors) or has some unprocessed neighbor (wait)
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
int neighborsSize = kptindices[i * width] - 1;
int *neighbors = &(kptindices[i * width + 1]);
// Only do if we didn't process the point before
if (memberarray[i] == -1) {
// If we process at least one point we shouldn't break
// No need to sync. Only want to know if at least one thread wants to
// continue
shouldBreak[0] = false;
// Sort neighbors according to the order of currently added points
// (often very few)
// If the neighbor has been replaced, stick it to the back
// If any neighbor has not been processed, break;
bool shouldProcess = true;
for (int k = 0; k < neighborsSize; ++k) {
// If the point has one or more unprocessed neighbors, skip
if (memberarray[neighbors[k]] == -1) {
shouldProcess = false;
shouldAdd[i] = false;
break;
}
// If it has a neighbor that is in the list, we don't add, but process
if (memberarray[neighbors[k]] >= 0) {
shouldAdd[i] = false;
}
}
// We should process and potentially replace the neighbor
if (shouldProcess && !shouldAdd[i]) {
// Find the smallest neighbor. Often only one or two, so no ned for fancy algorithm
for (int k = 0; k < neighborsSize; ++k) {
for (int j = k + 1; j < neighborsSize; ++j) {
if (memberarray[neighbors[k]] == -2 ||
(memberarray[neighbors[j]] != -2 &&
memberarray[neighbors[j]] < memberarray[neighbors[k]])) {
int t = neighbors[k];
neighbors[k] = neighbors[j];
neighbors[j] = t;
}
}
}
// Pick the first neighbor
// We need to make sure, in case more than one point has this
// neighbor,
// That the point with lowest memberarrayindex processes it first
// Here minneighbor[i] is the target and i the neighbor
int nidx = neighbors[0];
minneighbor[nidx] = min(minneighbor[nidx], (int)i);
}
}
}
__syncthreads();
// Check which points we can add
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (memberarray[i] == -1) {
if (shouldAdd[i]) {
memberarray[i] = i;
}
}
}
__syncthreads();
// Look at the neighbors. If the response is higher, replace
for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
if (minneighbor[i] != nump+1) {
if (memberarray[minneighbor[i]] == -1) {
if (!shouldAdd[minneighbor[i]]) {
const cv::KeyPoint &p0 = kpts[minneighbor[i]];
const cv::KeyPoint &p1 = kpts[i];
if (p0.response > p1.response) {
memberarray[minneighbor[i]] = i;
memberarray[i] = -2;
} else {
memberarray[minneighbor[i]] = -2;
}
}
}
}
}
__syncthreads();
// End outer loop
//for (size_t i = threadIdx.x; i < nump; i += FilterExtremaThreads) {
// storedmemberarray[i+offset] = memberarray[i];
// }
// __syncthreads();
//}
// Are we done?
if (shouldBreak[0]) break;
if (threadIdx.x == 0) {
shouldBreak[0] = true;
}
__syncthreads();
}
__syncthreads();
}
__global__ void sortFiltered_kernel(cv::KeyPoint *kpts, cv::KeyPoint *newkpts,
int *memberarray) {
__shared__ int minneighbor[2048];
__shared__ int curridx[1];
int nump = d_PointCounter[0];
if (threadIdx.x == 0) {
curridx[0] = 0;
}
// Sort array
const int upper = (nump + 2047) & (0xfffff800);
for (int i = threadIdx.x; i < upper; i += 2 * FilterExtremaThreads) {
minneighbor[threadIdx.x] =
i >= nump ? nump+1 : (memberarray[i] < 0 ? nump+1 : (kpts[memberarray[i]].size < 0 ? nump+1 : memberarray[i]));
minneighbor[threadIdx.x + 1024] =
i + 1024 >= nump ? nump+1
: (memberarray[i + 1024] < 0 ? nump+1 : (kpts[memberarray[i+1024]].size < 0 ? nump+1 : memberarray[i+1024]));
__syncthreads();
// Sort and store keypoints
#pragma unroll 1
for (int k = 1; k < 2048; k <<= 1) {
int sortdir = (threadIdx.x & k) > 0 ? 0 : 1;
#pragma unroll 1
for (int j = k; j > 0; j >>= 1) {
int mask = 0x0fffffff * j;
int tidx = ((threadIdx.x & mask) << 1) + (threadIdx.x & ~mask);
atomicSort(minneighbor, tidx, j, j * sortdir);
__syncthreads();
}
}
__syncthreads();
#pragma unroll 1
for (int k = threadIdx.x; k < 2048; k += 1024) {
if (minneighbor[k] < nump) {
// Restore subpixel component
cv::KeyPoint &okpt = kpts[minneighbor[k]];
float octsub = fabs(*(float*)(&kpts[minneighbor[k]].octave));
int octave = (int)octsub;
float subp = (*(float*)(&kpts[minneighbor[k]].octave) < 0 ? -1 : 1) * (octsub - octave);
float ratio = 1 << octave;
cv::KeyPoint &tkpt = newkpts[k + curridx[0]];
tkpt.pt.y = ratio * ((int)(0.5f+okpt.pt.y / ratio) + okpt.angle);
tkpt.pt.x = ratio * ((int)(0.5f+okpt.pt.x / ratio) + subp);
// newkpts[k + curridx[0] + threadIdx.x].angle = 0; // This will be set elsewhere
tkpt.class_id = okpt.class_id;
tkpt.octave = octave;
tkpt.response = okpt.response;
tkpt.size = okpt.size;
}
}
__syncthreads();
// How many did we add?
if (minneighbor[2047] < nump) {
curridx[0] += 2048;
} else {
if (minneighbor[1024] < nump) {
if (threadIdx.x < 1023 && minneighbor[1024 + threadIdx.x] < nump &&
minneighbor[1024 + threadIdx.x + 1] == nump+1) {
curridx[0] += 1024 + threadIdx.x + 1;
}
} else {
if (minneighbor[threadIdx.x] < nump &&
minneighbor[threadIdx.x + 1] == nump+1) {
curridx[0] += threadIdx.x + 1;
}
}
__syncthreads();
}
}
__syncthreads();
if (threadIdx.x == 0) {
d_PointCounter[0] = curridx[0];
}
}
void FilterExtrema(cv::KeyPoint *pts, cv::KeyPoint *newpts, int* kptindices, int& nump) {
//int nump;
cudaMemcpyFromSymbol(&nump, d_PointCounter, sizeof(int));
unsigned int extremaidx_h[16];
cudaMemcpyFromSymbol(extremaidx_h,d_ExtremaIdx,16*sizeof(unsigned int));
int maxnump = extremaidx_h[0];
for (int i=1; i<16; ++i) {
maxnump = max(maxnump,extremaidx_h[i]-extremaidx_h[i-1]);
}
int width = ceil(21) * ceil(21);
// Sort the list of points
dim3 blocks(16, 1, 1);
dim3 threads(BitonicSortThreads, 1, 1);
if (maxnump <= 8*1024) {
bitonicSort << <blocks, threads>>> (pts, newpts);
} else {
int nump_ceil = 1;
while (nump_ceil < nump) nump_ceil <<= 1;
std::cout << "numpceil: " << nump_ceil << std::endl;
sortstruct_t<int>* sortstruct;
cudaMalloc((void**)&sortstruct, nump_ceil*16*sizeof(sortstruct_t<int>));
bitonicSort_global << <blocks, threads>>> (pts, newpts, sortstruct,nump_ceil);
cudaFree(sortstruct);
}
CHK
/* cv::KeyPoint* newpts_h = new cv::KeyPoint[nump];
cudaMemcpy(newpts_h,newpts,nump*sizeof(cv::KeyPoint),cudaMemcpyDeviceToHost);
int scale = 0;
for (int i=1; i<nump; ++i) {
cv::KeyPoint &k0 = newpts_h[i-1];
cv::KeyPoint &k1 = newpts_h[i];
std::cout << i << ": " << newpts_h[i].class_id << ": " << newpts_h[i].pt.y << " " << newpts_h[i].pt.x << ", " << newpts_h[i].size;
if (!(k0.pt.y<k1.pt.y || (k0.pt.y==k1.pt.y && k0.pt.x<k1.pt.x))) std::cout << " <<<<";
if (k1.size < 0 ) std::cout << " ##############";
std::cout << "\n";
}
*/
// Find all neighbors
cudaStreamSynchronize(copyStream);
blocks.x = nump;
threads.x = FindNeighborsThreads;
FindNeighbors << <blocks, threads>>> (newpts, kptindices, width);
CHK
//cudaDeviceSynchronize();
//safeCall(cudaGetLastError());
// Filter extrema
blocks.x = 1;
threads.x = FilterExtremaThreads;
int *buffer1, *buffer2;
cudaMalloc((void**)&buffer1, nump*sizeof(int));
cudaMalloc((void**)&buffer2, nump*sizeof(int));
char* buffer3;
cudaMalloc((void**)&buffer3, nump);
FilterExtrema_kernel << <blocks, threads>>> (newpts, pts, kptindices, width,
buffer1, buffer2, buffer3);
threads.x = 1024;
sortFiltered_kernel << <blocks, threads>>> (newpts, pts, buffer1);
CHK
//cudaDeviceSynchronize();
//safeCall(cudaGetLastError());
cudaFree(buffer1);
cudaFree(buffer2);
cudaFree(buffer3);
cudaMemcpyFromSymbolAsync(&nump, d_PointCounter, sizeof(int));
}
int GetPoints(std::vector<cv::KeyPoint> &h_pts, cv::KeyPoint *d_pts, int numPts) {
h_pts.resize(numPts);
safeCall(cudaMemcpyAsync((float *)&h_pts[0], d_pts,
sizeof(cv::KeyPoint) * numPts,
cudaMemcpyDeviceToHost, copyStream));
return numPts;
}
void GetDescriptors(cv::Mat &h_desc, cv::Mat &d_desc, int numPts) {
h_desc = cv::Mat(numPts, 61, CV_8U);
cudaMemcpyAsync(h_desc.data, d_desc.data, numPts*61, cudaMemcpyDeviceToHost, copyStream);
}
__global__ void ExtractDescriptors(cv::KeyPoint *d_pts, CudaImage *d_imgs,
float *_vals, int size2, int size3,
int size4) {
__shared__ float acc_vals[3 * 30 * EXTRACT_S];
float *acc_vals_im = &acc_vals[0];
float *acc_vals_dx = &acc_vals[30 * EXTRACT_S];
float *acc_vals_dy = &acc_vals[2 * 30 * EXTRACT_S];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
for (int i = 0; i < 30; ++i) {
acc_vals_im[i * EXTRACT_S + tx] = 0.f;
acc_vals_dx[i * EXTRACT_S + tx] = 0.f;
acc_vals_dy[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// Add 2x2
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx] += im;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (y2 * 2 + x2) + 3 * 30 * tx + 2] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// Add 3x3
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + y3 * 3 + x3) + 3 * 30 * tx + 2] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// Add 4x4
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx] += im;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 1] += rx;
acc_vals[3 * (4 + 9 + y4 * 4 + x4) + 3 * 30 * tx + 2] += ry;
}
}
__syncthreads();
// Reduce stuff
float acc_reg;
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
for (int d = 0; d < 90; d += 30) {
if (tx_d < 32) {
acc_reg = acc_vals[3 * 30 * tx_d + offset + d] +
acc_vals[3 * 30 * (tx_d + 32) + offset + d];
acc_reg += __shfl_down(acc_reg, 1);
acc_reg += __shfl_down(acc_reg, 2);
acc_reg += __shfl_down(acc_reg, 4);
acc_reg += __shfl_down(acc_reg, 8);
acc_reg += __shfl_down(acc_reg, 16);
}
if (tx_d == 0) {
acc_vals[offset + d] = acc_reg;
}
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = acc_vals[tx];
vals[29 + tx] = acc_vals[29 + tx];
vals[2 * 29 + tx] = acc_vals[2 * 29 + tx];
}
}
__global__ void ExtractDescriptors_serial(cv::KeyPoint *d_pts,
CudaImage *d_imgs, float *_vals,
int size2, int size3, int size4) {
__shared__ float acc_vals[30 * EXTRACT_S];
__shared__ float final_vals[3 * 30];
int p = blockIdx.x;
float *vals = &_vals[p * 3 * 29];
float iratio = 1.0f / (1 << d_pts[p].octave);
int scale = (int)(0.5f * d_pts[p].size * iratio + 0.5f);
float xf = d_pts[p].pt.x * iratio;
float yf = d_pts[p].pt.y * iratio;
float ang = d_pts[p].angle;
float co = cos(ang);
float si = sin(ang);
int tx = threadIdx.x;
int lev = d_pts[p].class_id;
float *imd = d_imgs[4 * lev + 0].d_data;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int winsize = max(3 * size3, 4 * size4);
// IM
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float im = imd[pos];
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += im;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += im;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += im;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DX
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float rx = -dx * si + dy * co;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += rx;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += rx;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += rx;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
// DY
for (int i = 0; i < 30; ++i) {
acc_vals[i * EXTRACT_S + tx] = 0.f;
}
__syncthreads();
for (int i = tx; i < winsize * winsize; i += EXTRACT_S) {
int y = i / winsize;
int x = i - winsize * y;
int m = max(x, y);
if (m >= winsize) continue;
int l = x - size2;
int k = y - size2;
int xp = (int)(xf + scale * (k * co - l * si) + 0.5f);
int yp = (int)(yf + scale * (k * si + l * co) + 0.5f);
int pos = yp * pitch + xp;
float dx = dxd[pos];
float dy = dyd[pos];
float ry = dx * co + dy * si;
if (m < 2 * size2) {
int x2 = (x < size2 ? 0 : 1);
int y2 = (y < size2 ? 0 : 1);
// atomicAdd(norm2, (x < size2 && y < size2 ? 1 : 0));
// Add 2x2
acc_vals[(y2 * 2 + x2) + 30 * tx] += ry;
}
if (m < 3 * size3) {
int x3 = (x < size3 ? 0 : (x < 2 * size3 ? 1 : 2));
int y3 = (y < size3 ? 0 : (y < 2 * size3 ? 1 : 2));
// atomicAdd(norm3, (x < size3 && y < size3 ? 1 : 0));
// Add 3x3
acc_vals[(4 + y3 * 3 + x3) + 30 * tx] += ry;
}
if (m < 4 * size4) {
int x4 = (x < 2 * size4 ? (x < size4 ? 0 : 1) : (x < 3 * size4 ? 2 : 3));
int y4 = (y < 2 * size4 ? (y < size4 ? 0 : 1) : (y < 3 * size4 ? 2 : 3));
// atomicAdd(norm4, (x < size4 && y < size4 ? 1 : 0));
// Add 4x4
acc_vals[(4 + 9 + y4 * 4 + x4) + 30 * tx] += ry;
}
}
__syncthreads();
// Reduce stuff
#pragma unroll
for (int i = 0; i < 15; ++i) {
// 0..31 takes care of even accs, 32..63 takes care of odd accs
int offset = 2 * i + (tx < 32 ? 0 : 1);
int tx_d = tx < 32 ? tx : tx - 32;
int acc_idx = 30 * tx_d + offset;
if (tx_d < 32) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 32];
}
if (tx_d < 16) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 16];
}
if (tx_d < 8) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 8];
}
if (tx_d < 4) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 4];
}
if (tx_d < 2) {
acc_vals[acc_idx] += acc_vals[acc_idx + 30 * 2];
}
if (tx_d < 1) {
final_vals[3 * offset] = acc_vals[acc_idx] + acc_vals[offset + 30];
}
}
__syncthreads();
// Have 29*3 values to store
// They are in acc_vals[0..28,64*30..64*30+28,64*60..64*60+28]
if (tx < 29) {
vals[tx] = final_vals[tx];
vals[29 + tx] = final_vals[29 + tx];
vals[2 * 29 + tx] = final_vals[2 * 29 + tx];
}
}
__global__ void BuildDescriptor(float *_valsim, unsigned char *_desc) {
int p = blockIdx.x;
size_t idx = threadIdx.x;
if (idx < 61) {
float *valsim = &_valsim[3 * 29 * p];
unsigned char *desc = &_desc[61 * p];
unsigned char desc_r = 0;
#pragma unroll
for (int i = 0; i < (idx == 60 ? 6 : 8); ++i) {
int idx1 = comp_idx_1[idx * 8 + i];
int idx2 = comp_idx_2[idx * 8 + i];
desc_r |= (valsim[idx1] > valsim[idx2] ? 1 : 0) << i;
}
desc[idx] = desc_r;
}
}
double ExtractDescriptors(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs,
unsigned char *desc_d, float* vals_d, int patsize, int numPts) {
int size2 = patsize;
int size3 = ceil(2.0f * patsize / 3.0f);
int size4 = ceil(0.5f * patsize);
//int numPts;
//cudaMemcpyFromSymbol(&numPts, d_PointCounter, sizeof(int));
// TimerGPU timer0(0);
dim3 blocks(numPts);
dim3 threads(EXTRACT_S);
ExtractDescriptors << <blocks, threads>>>(d_pts, d_imgs, vals_d, size2, size3, size4);
CHK;
cudaMemsetAsync(desc_d, 0, numPts * 61);
BuildDescriptor << <blocks, 64>>> (vals_d, desc_d);
CHK;
////checkMsg("ExtractDescriptors() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("ExtractDescriptors time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
#define NTHREADS_MATCH 32
__global__ void MatchDescriptors(unsigned char *d1, unsigned char *d2,
int pitch, int nkpts_2, cv::DMatch *matches) {
int p = blockIdx.x;
int x = threadIdx.x;
__shared__ int idxBest[NTHREADS_MATCH];
__shared__ int idxSecondBest[NTHREADS_MATCH];
__shared__ int scoreBest[NTHREADS_MATCH];
__shared__ int scoreSecondBest[NTHREADS_MATCH];
idxBest[x] = 0;
idxSecondBest[x] = 0;
scoreBest[x] = 512;
scoreSecondBest[x] = 512;
__syncthreads();
// curent version fixed with popc, still not convinced
unsigned long long *d1i = (unsigned long long *)(d1 + pitch * p);
for (int i = 0; i < nkpts_2; i += NTHREADS_MATCH) {
unsigned long long *d2i = (unsigned long long *)(d2 + pitch * (x + i));
if (i + x < nkpts_2) {
// Check d1[p] with d2[i]
int score = 0;
#pragma unroll
for (int j = 0; j < 8; ++j) {
score += __popcll(d1i[j] ^ d2i[j]);
}
if (score < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = score;
idxSecondBest[x] = idxBest[x];
idxBest[x] = i + x;
} else if (score < scoreSecondBest[x]) {
scoreSecondBest[x] = score;
idxSecondBest[x] = i + x;
}
}
}
// for( int i=16; i>=1; i/=2) {
// int tBest = __shfl_down(scoreBest,i);
// int tIdx = __shfl_down(idxBest,i);
// if(tBest < scoreBest) {
// scoreSecondBest = scoreBest;
// idxSecondBest = idxBest;
// scoreBest = tBest;
// idxBest = tIdx;
// }
// tBest = __shfl_down(scoreSecondBest,i);
// tIdx = __shfl_down(idxSecondBest,i);
// if(tBest < scoreSecondBest) {
// scoreSecondBest = tBest;
// idxSecondBest = tIdx;
// }
// }
__syncthreads();
for (int i = NTHREADS_MATCH / 2; i >= 1; i /= 2) {
if (x < i) {
if (scoreBest[x + i] < scoreBest[x]) {
scoreSecondBest[x] = scoreBest[x];
scoreBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x];
idxBest[x] = idxBest[x + i];
} else if (scoreBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreBest[x + i];
idxSecondBest[x] = idxBest[x + i];
}
if (scoreSecondBest[x + i] < scoreSecondBest[x]) {
scoreSecondBest[x] = scoreSecondBest[x + i];
idxSecondBest[x] = idxSecondBest[x + i];
}
}
}
// if(i>16) __syncthreads();
// if(x<i) {
// if( scoreBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreBest[x+i];
// idxSecondBest[x] = idxBest[x+i];
// } else if (scoreSecondBest[x+i] < scoreSecondBest[x] ) {
// scoreSecondBest[x] = scoreSecondBest[x+i];
// idxSecondBest[x] = idxSecondBest[x+i];
// }
// }
// if(i>16) __syncthreads();
//}
/*for (int i = 1; i <= NTHREADS_MATCH; ++i) {
if (scoreBest[i] < scoreBest[0]) {
scoreSecondBest[0] = scoreBest[0];
scoreBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[0];
idxBest[0] = idxBest[i];
} else if( scoreBest[i] < scoreSecondBest[0] ) {
scoreSecondBest[0] = scoreBest[i];
idxSecondBest[0] = idxBest[i];
}
if(scoreSecondBest[i] < scoreSecondBest[0]) {
scoreSecondBest[0] = scoreSecondBest[i];
idxSecondBest[0] = idxSecondBest[i];
}
}*/
// if(x==0) {
// matches[2*p].queryIdx = p;
// matches[2*p].trainIdx = idxBest;
// matches[2*p].distance = scoreBest;
// matches[2*p+1].queryIdx = p;
// matches[2*p+1].trainIdx = idxSecondBest;
// matches[2*p+1].distance = scoreSecondBest;
// }
if (x == 0) {
matches[2 * p].queryIdx = p;
matches[2 * p].trainIdx = idxBest[x];
matches[2 * p].distance = scoreBest[x];
matches[2 * p + 1].queryIdx = p;
matches[2 * p + 1].trainIdx = idxSecondBest[x];
matches[2 * p + 1].distance = scoreSecondBest[x];
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches,
size_t pitch,
unsigned char* descq_d, unsigned char* desct_d, cv::DMatch* dmatches_d, cv::DMatch* dmatches_h) {
dim3 block(desc_query.rows);
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch, desc_train.rows, dmatches_d);
cudaMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
cudaMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
}
void MatchDescriptors(cv::Mat &desc_query, cv::Mat &desc_train,
std::vector<std::vector<cv::DMatch> > &dmatches) {
size_t pitch1, pitch2;
unsigned char *descq_d;
cudaMallocPitch(&descq_d, &pitch1, 64, desc_query.rows);
cudaMemset2D(descq_d, pitch1, 0, 64, desc_query.rows);
cudaMemcpy2D(descq_d, pitch1, desc_query.data, desc_query.cols,
desc_query.cols, desc_query.rows, cudaMemcpyHostToDevice);
unsigned char *desct_d;
cudaMallocPitch(&desct_d, &pitch2, 64, desc_train.rows);
cudaMemset2D(desct_d, pitch2, 0, 64, desc_train.rows);
cudaMemcpy2D(desct_d, pitch2, desc_train.data, desc_train.cols,
desc_train.cols, desc_train.rows, cudaMemcpyHostToDevice);
dim3 block(desc_query.rows);
cv::DMatch *dmatches_d;
cudaMalloc(&dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch));
MatchDescriptors << <block, NTHREADS_MATCH>>>(descq_d, desct_d, pitch1, desc_train.rows, dmatches_d);
cv::DMatch *dmatches_h = new cv::DMatch[2 * desc_query.rows];
cudaMemcpy(dmatches_h, dmatches_d, desc_query.rows * 2 * sizeof(cv::DMatch),
cudaMemcpyDeviceToHost);
for (int i = 0; i < desc_query.rows; ++i) {
std::vector<cv::DMatch> tdmatch;
//std::cout << dmatches_h[2*i].trainIdx << " - " << dmatches_h[2*i].queryIdx << std::endl;
tdmatch.push_back(dmatches_h[2 * i]);
tdmatch.push_back(dmatches_h[2 * i + 1]);
dmatches.push_back(tdmatch);
}
cudaFree(descq_d);
cudaFree(desct_d);
cudaFree(dmatches_d);
delete[] dmatches_h;
}
void InitCompareIndices() {
int comp_idx_1_h[61 * 8];
int comp_idx_2_h[61 * 8];
int cntr = 0;
for (int j = 0; j < 4; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 0; j < 3; ++j) {
for (int i = j + 1; i < 4; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 3x3
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 4; j < 12; ++j) {
for (int i = j + 1; i < 13; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
// 4x4
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j;
comp_idx_2_h[cntr] = 3 * i;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 1;
comp_idx_2_h[cntr] = 3 * i + 1;
cntr++;
}
}
for (int j = 13; j < 28; ++j) {
for (int i = j + 1; i < 29; ++i) {
comp_idx_1_h[cntr] = 3 * j + 2;
comp_idx_2_h[cntr] = 3 * i + 2;
cntr++;
}
}
cudaMemcpyToSymbol(comp_idx_1, comp_idx_1_h, 8 * 61 * sizeof(int));
cudaMemcpyToSymbol(comp_idx_2, comp_idx_2_h, 8 * 61 * sizeof(int));
}
__global__ void FindOrientation(cv::KeyPoint *d_pts, CudaImage *d_imgs) {
__shared__ float resx[42], resy[42];
__shared__ float re8x[42], re8y[42];
int p = blockIdx.x;
int tx = threadIdx.x;
if (tx < 42) resx[tx] = resy[tx] = 0.0f;
__syncthreads();
int lev = d_pts[p].class_id;
float *dxd = d_imgs[4 * lev + 2].d_data;
float *dyd = d_imgs[4 * lev + 3].d_data;
int pitch = d_imgs[4 * lev + 0].pitch;
int octave = d_pts[p].octave;
int step = (int)(0.5f * d_pts[p].size + 0.5f) >> octave;
int x = (int)(d_pts[p].pt.x + 0.5f) >> octave;
int y = (int)(d_pts[p].pt.y + 0.5f) >> octave;
int i = (tx & 15) - 6;
int j = (tx / 16) - 6;
int r2 = i * i + j * j;
if (r2 < 36) {
float gweight = exp(-r2 / (2.5f * 2.5f * 2.0f));
int pos = (y + step * j) * pitch + (x + step * i);
float dx = gweight * dxd[pos];
float dy = gweight * dyd[pos];
float angle = atan2(dy, dx);
int a = max(min((int)(angle * (21 / CV_PI)) + 21, 41), 0);
atomicAdd(resx + a, dx);
atomicAdd(resy + a, dy);
}
__syncthreads();
if (tx < 42) {
re8x[tx] = resx[tx];
re8y[tx] = resy[tx];
for (int k = tx + 1; k < tx + 7; k++) {
re8x[tx] += resx[k < 42 ? k : k - 42];
re8y[tx] += resy[k < 42 ? k : k - 42];
}
}
__syncthreads();
if (tx == 0) {
float maxr = 0.0f;
int maxk = 0;
for (int k = 0; k < 42; k++) {
float r = re8x[k] * re8x[k] + re8y[k] * re8y[k];
if (r > maxr) {
maxr = r;
maxk = k;
}
}
float angle = atan2(re8y[maxk], re8x[maxk]);
d_pts[p].angle = (angle < 0.0f ? angle + 2.0f * CV_PI : angle);
// printf("XXX %.2f %.2f %.2f\n", d_pts[p].pt.x, d_pts[p].pt.y,
// d_pts[p].angle/CV_PI*180.0f);
}
}
double FindOrientation(cv::KeyPoint *d_pts, std::vector<CudaImage> &h_imgs, CudaImage *d_imgs, int numPts) {
safeCall(cudaMemcpyAsync(d_imgs, (float *)&h_imgs[0],
sizeof(CudaImage) * h_imgs.size(),
cudaMemcpyHostToDevice));
// TimerGPU timer0(0);
cudaStreamSynchronize(0);
dim3 blocks(numPts);
dim3 threads(ORIENT_S);
FindOrientation << <blocks, threads>>> (d_pts, d_imgs);
CHK
// checkMsg("FindOrientation() execution failed\n");
// safeCall(cudaThreadSynchronize());
double gpuTime = 0; // timer0.read();
#ifdef VERBOSE
printf("FindOrientation time = %.2f ms\n", gpuTime);
#endif
return gpuTime;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.