hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
e836721c655fb6157fdf1813efd6cfade6e1dd39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <typeinfo>
#include "kernels.cu.h"
#include "reduce_kernels.cu"
#include "reduce_kernels_specialized.cu"
#include "general_kernels.cu"
#include "general_kernels_specialized.cu"
template <typename T, int max_threads>
T* transpose_matrix(T* matrix_device, dim3 block, dim3 grid, int rows, int cols) {
T* t_matrix_device = NULL;
int shared_array_size_bytes = block.x * (block.y + 1) * sizeof(T);
CudaUtils::malloc_on_gpu<T, false>((void**) &t_matrix_device, cols, rows);
hipLaunchKernelGGL(( transpose<T>) , dim3(grid), dim3(block), shared_array_size_bytes, 0, t_matrix_device, matrix_device, cols, rows);
CUDA_CHECK_RETURN(hipDeviceSynchronize());
return t_matrix_device;
}
template <typename T, typename Operation, typename SharedMatrixSetter, bool specialized>
T* apply_reduce_operation_line(dim3& block, dim3& grid, T* matrix_device,
int rows, int cols, Operation op, SharedMatrixSetter setter) {
return apply_reduce_operation_line<T, Operation, SharedMatrixSetter, SharedMatrixSetter, specialized>
(
block, grid, matrix_device, rows, cols, op, setter, setter
);
}
/**
* Aplica uma determinada operao em cada linha da matriz em paralelo
*/
template <typename T, typename Operation, typename SharedMatrixSetter1, typename SharedMatrixSetter2, bool specialized>
T* apply_reduce_operation_line(dim3& block, dim3& grid, T* matrix_device,
int rows, int cols, Operation op, SharedMatrixSetter1 setter1, SharedMatrixSetter2 setter2) {
T* block_matrix_device = NULL;
int shared_matrix_size = block.x * block.y;
int shared_matrix_size_bytes = shared_matrix_size * sizeof(T);
int block_matrix_cols = grid.x;
CudaUtils::malloc_on_gpu<T, false>((void**) &block_matrix_device, rows, block_matrix_cols);
//printf("\n\n\nmatrix_device on reduce %s %s %s %s\n\n\n", typeid(T).name(),
// typeid(Operation).name(), typeid(SharedMatrixSetter1).name(), typeid(SharedMatrixSetter2).name()
//);
//CudaUtils::load_and_print<T, true>(matrix_device, rows, cols);
if(specialized) {
if(typeid(Operation) == typeid(MaxReduceOperation<T>) && typeid(SharedMatrixSetter1) == typeid(DefaultSharedPositionSetter<T>)) {
hipLaunchKernelGGL(( multiple_reduce_max_default_setter<T>) , dim3(grid), dim3(block), shared_matrix_size_bytes, 0,
matrix_device, block_matrix_device, rows, cols);
} else if(typeid(Operation) == typeid(SumReduceOperation<T>)) {
hipLaunchKernelGGL(( multiple_reduce_sum_dfi_setter<T>) , dim3(grid), dim3(block), shared_matrix_size_bytes, 0,
matrix_device, block_matrix_device, rows, cols);
}
} else {
hipLaunchKernelGGL(( multiple_reduce<T>) , dim3(grid), dim3(block), shared_matrix_size_bytes, 0,
matrix_device, block_matrix_device, rows, cols, op, setter1);
}
CUDA_CHECK_RETURN(hipDeviceSynchronize());
//printf("\n\n\nblock_matrix_device on reduce %s %d %d %d %d\n\n\n", typeid(T).name(), rows,
// block_matrix_cols, grid.x, grid.y
//);
//CudaUtils::load_and_print<T, true>(block_matrix_device, rows, block_matrix_cols);
block.y = 1;
block.x = block_matrix_cols;
grid.y = 1;
grid.x = rows;
shared_matrix_size = block.x;
shared_matrix_size_bytes = block.x * sizeof(T);
if(specialized) {
if(typeid(Operation) == typeid(MaxReduceOperation<T>) && typeid(SharedMatrixSetter2) == typeid(DefaultSharedPositionSetter<T>)) {
hipLaunchKernelGGL(( multiple_reduce_2_max_default_setter<T>) , dim3(grid), dim3(block), shared_matrix_size_bytes, 0,
block_matrix_device, rows, block_matrix_cols
);
} else if(typeid(Operation) == typeid(SumReduceOperation<T>)) {
hipLaunchKernelGGL(( multiple_reduce_2_sum_default_setter<T>) , dim3(grid), dim3(block), shared_matrix_size_bytes, 0,
block_matrix_device, rows, block_matrix_cols
);
}
} else {
hipLaunchKernelGGL(( multiple_reduce_2<T>) , dim3(grid), dim3(block), shared_matrix_size_bytes, 0,
block_matrix_device, rows, block_matrix_cols, op, setter2
);
}
CUDA_CHECK_RETURN(hipDeviceSynchronize());
//printf("\n\n\nblock_matrix_device on reduce2 %s %d %d\n\n\n", typeid(T).name(), rows, block_matrix_cols);
//CudaUtils::load_and_print<T, true>(block_matrix_device, rows, block_matrix_cols);
return block_matrix_device;
}
/**
* Normaliza a matriz linha a linha
*/
template <typename T, typename N, int max_threads, bool specialized>
N* normalize_matrix_kernel_wrapper(dim3 matrix_block, dim3 matrix_grid,
T* matrix_device, int rows, int cols) {
dim3 max_matrix_block(matrix_block), max_matrix_grid(matrix_grid);
//printf("\n\nmatrix_device\n\n");
//CudaUtils::load_and_print<T, false>(matrix_device, rows, cols, rows - 10, cols - 10);
T* max_matrix_device = apply_reduce_operation_line<T, MaxReduceOperation<T>, DefaultSharedPositionSetter<T>, specialized>
(
max_matrix_block, max_matrix_grid, matrix_device, rows, cols,
MaxReduceOperation<T>(), DefaultSharedPositionSetter<T>()
);
//printf("\nNORMALIZE %d %d %s\n", rows, cols, typeid(T).name());
//printf("\n\nmax_matrix_device\n\n");
//CudaUtils::load_and_print<T, false>(max_matrix_device, rows, max_matrix_block.x);
dim3 t_max_matrix_block, t_max_matrix_grid;
CudaUtils::compute_num_threads_blocks(t_max_matrix_grid, t_max_matrix_block, max_threads, rows, max_matrix_block.x, true, true);
T* t_max_matrix_device = transpose_matrix<T, max_threads>(max_matrix_device, t_max_matrix_block, t_max_matrix_grid, rows, max_matrix_block.x);
T* t_max_matrix_device_last_line = & t_max_matrix_device[(max_matrix_block.x - 1) * (rows)];
//printf("\n\nt_max_matrix_device\n\n");
//CudaUtils::load_and_print<T, true>(t_max_matrix_device, max_matrix_block.x, rows);
//printf("\n\n_t_max_matrix_device_last_line\n\n");
//CudaUtils::load_and_print<T, true>(t_max_matrix_device_last_line, 1, rows);
CudaUtils::compute_num_threads_blocks(t_max_matrix_grid, t_max_matrix_block, max_threads, 1, rows, false, true);
T* t_max_matrix_reduced = apply_reduce_operation_line<T, MaxReduceOperation<T>, DefaultSharedPositionSetter<T>, specialized>
(
t_max_matrix_block, t_max_matrix_grid, t_max_matrix_device_last_line, 1, rows,
MaxReduceOperation<T>(), DefaultSharedPositionSetter<T>()
);
//printf("\n\n_t_max_matrix_device_last_line_reduced\n\n");
//CudaUtils::load_and_print<T, true>(t_max_matrix_reduced, 1, t_max_matrix_block.x, 0, 0);
N* matrix_result_device = NULL;
CudaUtils::malloc_on_gpu<T, false>((void**) &matrix_result_device, rows, cols);
//printf("t_m_m_b.x %d rows %d cols %d\n", t_max_matrix_block.x, rows, cols);
//printf("b_x %d b_y %d g_x %d g_y %d\n", matrix_block.x, matrix_block.y, matrix_grid.x, matrix_grid.y);
//printf("VAI CHAMAR O NORMALIZE\n");
if(typeid(T) == typeid(N)) {
if(specialized) {
hipLaunchKernelGGL(( multiple_normalize_non_cast<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x
);
} else {
hipLaunchKernelGGL(( multiple_normalize<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x, NonCastNormalizeMatrixOperation<T, N>()
);
}
} else {
if(specialized) {
hipLaunchKernelGGL(( multiple_normalize_cast<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x
);
} else {
hipLaunchKernelGGL(( multiple_normalize<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x, CastNormalizeMatrixOperation<T, N>()
);
}
}
CUDA_CHECK_RETURN(hipDeviceSynchronize());
CUDA_CHECK_RETURN(hipFree(max_matrix_device));
CUDA_CHECK_RETURN(hipFree(matrix_device));
CUDA_CHECK_RETURN(hipFree(t_max_matrix_device));
CUDA_CHECK_RETURN(hipFree(t_max_matrix_reduced));
//printf("\n\nresult_matrix_device\n\n");
//CudaUtils::load_and_print<T, false>(matrix_result_device, rows, cols);
//CudaUtils::load_and_print<T, false>(matrix_result_device, rows, cols, rows - 10, cols - 10);
return matrix_result_device;
}
template <typename T, typename N, int max_threads, bool specialized>
N* apply_tf_idf_metrics(dim3 matrix_block, dim3 matrix_grid, T* matrix_device, int rows, int cols) {
dim3 dfi_matrix_block(matrix_block), dfi_matrix_grid(matrix_grid);
dim3 max_fj_matrix_block(matrix_block), max_fj_matrix_grid(matrix_grid);
dim3 matrix_transpose_block(matrix_block), matrix_transpose_grid(matrix_grid);
CudaUtils::compute_num_threads_blocks(matrix_transpose_grid, matrix_transpose_block, max_threads, rows, cols, true, false);
CudaUtils::compute_num_threads_blocks(max_fj_matrix_grid, max_fj_matrix_block, max_threads, cols, rows, false, false);
N* tf_idf_matrix_device = NULL;
T* transposed_matrix_device = NULL;
T* max_fj_matrix_device = NULL;
//printf("TF_IDF (DFI_MAX)\n\n");
// Para cada linha da matriz, o valor do dfi est na ltima coluna.
T* dfi_matrix_device = apply_reduce_operation_line<T, SumReduceOperation<T>, DFISharedPositionSetter<T>, DefaultSharedPositionSetter<T>, specialized>
(
dfi_matrix_block, dfi_matrix_grid, matrix_device, rows, cols,
SumReduceOperation<T>(), DFISharedPositionSetter<T>(), DefaultSharedPositionSetter<T>()
);
//printf("\n\n(dfi_matrix_device)\n\n");
//CudaUtils::load_and_print<T, true>(dfi_matrix_device, rows, dfi_matrix_block.x);
transposed_matrix_device = transpose_matrix<T, max_threads>(matrix_device, matrix_transpose_block, matrix_transpose_grid, rows, cols);
//CudaUtils::load_and_print<int, true>(transposed_matrix_device, cols, rows, "%d ");
// Para cada linha da matriz, o valor do max(fj) est na ltima coluna.
// O nmero da coluna na matrix TxD indexa uma linha na matriz do max(fj)
//printf("TF_IDF (MAX_FJ)\n\n");
max_fj_matrix_device = apply_reduce_operation_line<T, MaxReduceOperation<T>, DefaultSharedPositionSetter<T>, specialized>
(
max_fj_matrix_block, max_fj_matrix_grid, transposed_matrix_device,
cols, rows, MaxReduceOperation<T>(), DefaultSharedPositionSetter<T>()
);
CUDA_CHECK_RETURN(hipFree(transposed_matrix_device));
//printf("\n\n(max_fj_matrix)\n\n");
//CudaUtils::load_and_print<T, true>(max_fj_matrix_device, cols, max_fj_matrix_block.x);
CudaUtils::malloc_on_gpu<N, false>((void**) &tf_idf_matrix_device, rows, cols);
if(typeid(T) == typeid(N)) {
if(specialized) {
hipLaunchKernelGGL(( transform_tf_idf_non_cast<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x
);
} else {
hipLaunchKernelGGL(( transform_tf_idf<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x,
NonCastTFIDFOperation<T, N>()
);
}
} else {
if(specialized) {
hipLaunchKernelGGL(( transform_tf_idf_cast<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x
);
} else {
hipLaunchKernelGGL(( transform_tf_idf<T, N>) , dim3(matrix_grid), dim3(matrix_block), 0, 0,
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x,
CastTFIDFOperation<T, N>()
);
}
}
CUDA_CHECK_RETURN(hipDeviceSynchronize());
//CudaUtils::load_and_print<N, true>(tf_idf_matrix_device, rows, cols, rows - 10, cols - 10);
CUDA_CHECK_RETURN(hipFree(matrix_device));
CUDA_CHECK_RETURN(hipFree(dfi_matrix_device));
CUDA_CHECK_RETURN(hipFree(max_fj_matrix_device));
return tf_idf_matrix_device;
} | e836721c655fb6157fdf1813efd6cfade6e1dd39.cu | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <typeinfo>
#include "kernels.cu.h"
#include "reduce_kernels.cu"
#include "reduce_kernels_specialized.cu"
#include "general_kernels.cu"
#include "general_kernels_specialized.cu"
template <typename T, int max_threads>
T* transpose_matrix(T* matrix_device, dim3 block, dim3 grid, int rows, int cols) {
T* t_matrix_device = NULL;
int shared_array_size_bytes = block.x * (block.y + 1) * sizeof(T);
CudaUtils::malloc_on_gpu<T, false>((void**) &t_matrix_device, cols, rows);
transpose<T> <<<grid, block, shared_array_size_bytes>>>(t_matrix_device, matrix_device, cols, rows);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
return t_matrix_device;
}
template <typename T, typename Operation, typename SharedMatrixSetter, bool specialized>
T* apply_reduce_operation_line(dim3& block, dim3& grid, T* matrix_device,
int rows, int cols, Operation op, SharedMatrixSetter setter) {
return apply_reduce_operation_line<T, Operation, SharedMatrixSetter, SharedMatrixSetter, specialized>
(
block, grid, matrix_device, rows, cols, op, setter, setter
);
}
/**
* Aplica uma determinada operação em cada linha da matriz em paralelo
*/
template <typename T, typename Operation, typename SharedMatrixSetter1, typename SharedMatrixSetter2, bool specialized>
T* apply_reduce_operation_line(dim3& block, dim3& grid, T* matrix_device,
int rows, int cols, Operation op, SharedMatrixSetter1 setter1, SharedMatrixSetter2 setter2) {
T* block_matrix_device = NULL;
int shared_matrix_size = block.x * block.y;
int shared_matrix_size_bytes = shared_matrix_size * sizeof(T);
int block_matrix_cols = grid.x;
CudaUtils::malloc_on_gpu<T, false>((void**) &block_matrix_device, rows, block_matrix_cols);
//printf("\n\n\nmatrix_device on reduce %s %s %s %s\n\n\n", typeid(T).name(),
// typeid(Operation).name(), typeid(SharedMatrixSetter1).name(), typeid(SharedMatrixSetter2).name()
//);
//CudaUtils::load_and_print<T, true>(matrix_device, rows, cols);
if(specialized) {
if(typeid(Operation) == typeid(MaxReduceOperation<T>) && typeid(SharedMatrixSetter1) == typeid(DefaultSharedPositionSetter<T>)) {
multiple_reduce_max_default_setter<T> <<<grid, block, shared_matrix_size_bytes>>>
(matrix_device, block_matrix_device, rows, cols);
} else if(typeid(Operation) == typeid(SumReduceOperation<T>)) {
multiple_reduce_sum_dfi_setter<T> <<<grid, block, shared_matrix_size_bytes>>>
(matrix_device, block_matrix_device, rows, cols);
}
} else {
multiple_reduce<T> <<<grid, block, shared_matrix_size_bytes>>>
(matrix_device, block_matrix_device, rows, cols, op, setter1);
}
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//printf("\n\n\nblock_matrix_device on reduce %s %d %d %d %d\n\n\n", typeid(T).name(), rows,
// block_matrix_cols, grid.x, grid.y
//);
//CudaUtils::load_and_print<T, true>(block_matrix_device, rows, block_matrix_cols);
block.y = 1;
block.x = block_matrix_cols;
grid.y = 1;
grid.x = rows;
shared_matrix_size = block.x;
shared_matrix_size_bytes = block.x * sizeof(T);
if(specialized) {
if(typeid(Operation) == typeid(MaxReduceOperation<T>) && typeid(SharedMatrixSetter2) == typeid(DefaultSharedPositionSetter<T>)) {
multiple_reduce_2_max_default_setter<T> <<<grid, block, shared_matrix_size_bytes>>>
(
block_matrix_device, rows, block_matrix_cols
);
} else if(typeid(Operation) == typeid(SumReduceOperation<T>)) {
multiple_reduce_2_sum_default_setter<T> <<<grid, block, shared_matrix_size_bytes>>>
(
block_matrix_device, rows, block_matrix_cols
);
}
} else {
multiple_reduce_2<T> <<<grid, block, shared_matrix_size_bytes>>>
(
block_matrix_device, rows, block_matrix_cols, op, setter2
);
}
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//printf("\n\n\nblock_matrix_device on reduce2 %s %d %d\n\n\n", typeid(T).name(), rows, block_matrix_cols);
//CudaUtils::load_and_print<T, true>(block_matrix_device, rows, block_matrix_cols);
return block_matrix_device;
}
/**
* Normaliza a matriz linha a linha
*/
template <typename T, typename N, int max_threads, bool specialized>
N* normalize_matrix_kernel_wrapper(dim3 matrix_block, dim3 matrix_grid,
T* matrix_device, int rows, int cols) {
dim3 max_matrix_block(matrix_block), max_matrix_grid(matrix_grid);
//printf("\n\nmatrix_device\n\n");
//CudaUtils::load_and_print<T, false>(matrix_device, rows, cols, rows - 10, cols - 10);
T* max_matrix_device = apply_reduce_operation_line<T, MaxReduceOperation<T>, DefaultSharedPositionSetter<T>, specialized>
(
max_matrix_block, max_matrix_grid, matrix_device, rows, cols,
MaxReduceOperation<T>(), DefaultSharedPositionSetter<T>()
);
//printf("\nNORMALIZE %d %d %s\n", rows, cols, typeid(T).name());
//printf("\n\nmax_matrix_device\n\n");
//CudaUtils::load_and_print<T, false>(max_matrix_device, rows, max_matrix_block.x);
dim3 t_max_matrix_block, t_max_matrix_grid;
CudaUtils::compute_num_threads_blocks(t_max_matrix_grid, t_max_matrix_block, max_threads, rows, max_matrix_block.x, true, true);
T* t_max_matrix_device = transpose_matrix<T, max_threads>(max_matrix_device, t_max_matrix_block, t_max_matrix_grid, rows, max_matrix_block.x);
T* t_max_matrix_device_last_line = & t_max_matrix_device[(max_matrix_block.x - 1) * (rows)];
//printf("\n\nt_max_matrix_device\n\n");
//CudaUtils::load_and_print<T, true>(t_max_matrix_device, max_matrix_block.x, rows);
//printf("\n\n_t_max_matrix_device_last_line\n\n");
//CudaUtils::load_and_print<T, true>(t_max_matrix_device_last_line, 1, rows);
CudaUtils::compute_num_threads_blocks(t_max_matrix_grid, t_max_matrix_block, max_threads, 1, rows, false, true);
T* t_max_matrix_reduced = apply_reduce_operation_line<T, MaxReduceOperation<T>, DefaultSharedPositionSetter<T>, specialized>
(
t_max_matrix_block, t_max_matrix_grid, t_max_matrix_device_last_line, 1, rows,
MaxReduceOperation<T>(), DefaultSharedPositionSetter<T>()
);
//printf("\n\n_t_max_matrix_device_last_line_reduced\n\n");
//CudaUtils::load_and_print<T, true>(t_max_matrix_reduced, 1, t_max_matrix_block.x, 0, 0);
N* matrix_result_device = NULL;
CudaUtils::malloc_on_gpu<T, false>((void**) &matrix_result_device, rows, cols);
//printf("t_m_m_b.x %d rows %d cols %d\n", t_max_matrix_block.x, rows, cols);
//printf("b_x %d b_y %d g_x %d g_y %d\n", matrix_block.x, matrix_block.y, matrix_grid.x, matrix_grid.y);
//printf("VAI CHAMAR O NORMALIZE\n");
if(typeid(T) == typeid(N)) {
if(specialized) {
multiple_normalize_non_cast<T, N> <<<matrix_grid, matrix_block>>>
(
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x
);
} else {
multiple_normalize<T, N> <<<matrix_grid, matrix_block>>>
(
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x, NonCastNormalizeMatrixOperation<T, N>()
);
}
} else {
if(specialized) {
multiple_normalize_cast<T, N> <<<matrix_grid, matrix_block>>>
(
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x
);
} else {
multiple_normalize<T, N> <<<matrix_grid, matrix_block>>>
(
matrix_result_device, matrix_device, t_max_matrix_reduced,
rows, cols, t_max_matrix_block.x, CastNormalizeMatrixOperation<T, N>()
);
}
}
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaFree(max_matrix_device));
CUDA_CHECK_RETURN(cudaFree(matrix_device));
CUDA_CHECK_RETURN(cudaFree(t_max_matrix_device));
CUDA_CHECK_RETURN(cudaFree(t_max_matrix_reduced));
//printf("\n\nresult_matrix_device\n\n");
//CudaUtils::load_and_print<T, false>(matrix_result_device, rows, cols);
//CudaUtils::load_and_print<T, false>(matrix_result_device, rows, cols, rows - 10, cols - 10);
return matrix_result_device;
}
template <typename T, typename N, int max_threads, bool specialized>
N* apply_tf_idf_metrics(dim3 matrix_block, dim3 matrix_grid, T* matrix_device, int rows, int cols) {
dim3 dfi_matrix_block(matrix_block), dfi_matrix_grid(matrix_grid);
dim3 max_fj_matrix_block(matrix_block), max_fj_matrix_grid(matrix_grid);
dim3 matrix_transpose_block(matrix_block), matrix_transpose_grid(matrix_grid);
CudaUtils::compute_num_threads_blocks(matrix_transpose_grid, matrix_transpose_block, max_threads, rows, cols, true, false);
CudaUtils::compute_num_threads_blocks(max_fj_matrix_grid, max_fj_matrix_block, max_threads, cols, rows, false, false);
N* tf_idf_matrix_device = NULL;
T* transposed_matrix_device = NULL;
T* max_fj_matrix_device = NULL;
//printf("TF_IDF (DFI_MAX)\n\n");
// Para cada linha da matriz, o valor do dfi está na última coluna.
T* dfi_matrix_device = apply_reduce_operation_line<T, SumReduceOperation<T>, DFISharedPositionSetter<T>, DefaultSharedPositionSetter<T>, specialized>
(
dfi_matrix_block, dfi_matrix_grid, matrix_device, rows, cols,
SumReduceOperation<T>(), DFISharedPositionSetter<T>(), DefaultSharedPositionSetter<T>()
);
//printf("\n\n(dfi_matrix_device)\n\n");
//CudaUtils::load_and_print<T, true>(dfi_matrix_device, rows, dfi_matrix_block.x);
transposed_matrix_device = transpose_matrix<T, max_threads>(matrix_device, matrix_transpose_block, matrix_transpose_grid, rows, cols);
//CudaUtils::load_and_print<int, true>(transposed_matrix_device, cols, rows, "%d ");
// Para cada linha da matriz, o valor do max(fj) está na última coluna.
// O número da coluna na matrix TxD indexa uma linha na matriz do max(fj)
//printf("TF_IDF (MAX_FJ)\n\n");
max_fj_matrix_device = apply_reduce_operation_line<T, MaxReduceOperation<T>, DefaultSharedPositionSetter<T>, specialized>
(
max_fj_matrix_block, max_fj_matrix_grid, transposed_matrix_device,
cols, rows, MaxReduceOperation<T>(), DefaultSharedPositionSetter<T>()
);
CUDA_CHECK_RETURN(cudaFree(transposed_matrix_device));
//printf("\n\n(max_fj_matrix)\n\n");
//CudaUtils::load_and_print<T, true>(max_fj_matrix_device, cols, max_fj_matrix_block.x);
CudaUtils::malloc_on_gpu<N, false>((void**) &tf_idf_matrix_device, rows, cols);
if(typeid(T) == typeid(N)) {
if(specialized) {
transform_tf_idf_non_cast<T, N> <<<matrix_grid, matrix_block>>>
(
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x
);
} else {
transform_tf_idf<T, N> <<<matrix_grid, matrix_block>>>
(
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x,
NonCastTFIDFOperation<T, N>()
);
}
} else {
if(specialized) {
transform_tf_idf_cast<T, N> <<<matrix_grid, matrix_block>>>
(
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x
);
} else {
transform_tf_idf<T, N> <<<matrix_grid, matrix_block>>>
(
tf_idf_matrix_device, matrix_device, rows, cols,
dfi_matrix_device, max_fj_matrix_device,
dfi_matrix_block.x, max_fj_matrix_block.x,
CastTFIDFOperation<T, N>()
);
}
}
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
//CudaUtils::load_and_print<N, true>(tf_idf_matrix_device, rows, cols, rows - 10, cols - 10);
CUDA_CHECK_RETURN(cudaFree(matrix_device));
CUDA_CHECK_RETURN(cudaFree(dfi_matrix_device));
CUDA_CHECK_RETURN(cudaFree(max_fj_matrix_device));
return tf_idf_matrix_device;
} |
9df24d9c815d8ce7e1ad5cc602a5f7c9ccd31242.hip | // !!! This is a file automatically generated by hipify!!!
#include "MWTargetNetworkImpl.hpp"
#include "MWCNNLayerImpl.hpp"
#include <cassert>
#include <cmath>
#include <algorithm>
void MWTargetNetworkImpl::allocate(int BufSize, int numBufsToAlloc) {
numBufs
= numBufsToAlloc;
for (int i = 0; i < numBufs; i++) {
float *memPtr = 0;
CUDA_CALL(hipMalloc((void **) &memPtr, sizeof(float) * BufSize));
memBuffer.push_back(memPtr);
}
}
void
MWTargetNetworkImpl::allocatePermuteBuffers(int bufSize, int numBufsToAlloc) {
for (int i = 0; i < numBufsToAlloc; i++) {
float *memPtr = 0;
CUDA_CALL(hipMalloc((void **) &memPtr, sizeof(float) * bufSize));
mtolGPkUMBYDlSSqrRzc.push_back(memPtr);
}
}
void
MWTargetNetworkImpl::preSetup() {
NldNILHvuQqQPSAHXxdT = new
hipblasHandle_t;
if (!NldNILHvuQqQPSAHXxdT) {
MWCNNLayerImpl::throwAllocationError(__LINE__, __FILE__);
}
hipblasCreate(NldNILHvuQqQPSAHXxdT);
NmExSIssnXpisMKKatUq = new
cudnnHandle_t;
if (!NmExSIssnXpisMKKatUq) {
MWCNNLayerImpl::throwAllocationError(__LINE__, __FILE__);
}
cudnnCreate(NmExSIssnXpisMKKatUq);
}
void
MWTargetNetworkImpl::postSetup(MWCNNLayer *layers[], int numLayers) {
if
(*getProposedWorkSpaceSize() > *getAllocatedWorkSpaceSize()) {
if
(xcusoQxPPodcHwVviCWI) { destroyWorkSpace(xcusoQxPPodcHwVviCWI); }
createWorkSpace(xcusoQxPPodcHwVviCWI);
while ((!xcusoQxPPodcHwVviCWI) &&
(*getProposedWorkSpaceSize() > 0)) {
setProposedWorkSpaceSize(MWTargetNetworkImpl::getNextProposedWorkSpaceSize(*getProposedWorkSpaceSize()));
createWorkSpace(xcusoQxPPodcHwVviCWI);
}
}
for (int i = 0; i < numLayers; i++) { layers[i]->postSetup(); }
}
size_t
MWTargetNetworkImpl::getNextProposedWorkSpaceSize(size_t failedWorkSpaceSize) {
assert(failedWorkSpaceSize > 0);
return failedWorkSpaceSize / 2;
}
void
MWTargetNetworkImpl::createWorkSpace(float *&xkUNToJIgvoLoUQuzKRF) {
hipError_t rlQsibXJSWJVnUVpdNeL = hipMalloc((void **) &xkUNToJIgvoLoUQuzKRF,
*getProposedWorkSpaceSize());
if (rlQsibXJSWJVnUVpdNeL != hipSuccess) {
xkUNToJIgvoLoUQuzKRF = NULL;
setAllocatedWorkSpaceSize(0);
rlQsibXJSWJVnUVpdNeL = hipGetLastError();
} else {
setAllocatedWorkSpaceSize(*getProposedWorkSpaceSize());
}
}
void
MWTargetNetworkImpl::destroyWorkSpace(float *&xkUNToJIgvoLoUQuzKRF) {
CUDA_FREE_CALL(xkUNToJIgvoLoUQuzKRF);
xkUNToJIgvoLoUQuzKRF = NULL;
setAllocatedWorkSpaceSize(0);
}
void
MWTargetNetworkImpl::setProposedWorkSpaceSize(size_t wss) {
ncMionCCOTOYjWcmaIVD = wss;
}
size_t *
MWTargetNetworkImpl::getProposedWorkSpaceSize() {
return
&ncMionCCOTOYjWcmaIVD;
}
void
MWTargetNetworkImpl::setAllocatedWorkSpaceSize(size_t wss) {
GvDXGhRLfipwBoRPoGfI = wss;
}
size_t *
MWTargetNetworkImpl::getAllocatedWorkSpaceSize() {
return
&GvDXGhRLfipwBoRPoGfI;
}
float *
MWTargetNetworkImpl::getWorkSpace() { return xcusoQxPPodcHwVviCWI; }
float *
MWTargetNetworkImpl::getPermuteBuffer(int bufIndex) {
return
mtolGPkUMBYDlSSqrRzc[bufIndex];
}
hipblasHandle_t *
MWTargetNetworkImpl::getCublasHandle() { return NldNILHvuQqQPSAHXxdT; }
cudnnHandle_t *MWTargetNetworkImpl::getCudnnHandle() {
return
NmExSIssnXpisMKKatUq;
}
void MWTargetNetworkImpl::setAutoTune(bool
autotune) { MW_autoTune = autotune; }
bool MWTargetNetworkImpl::getAutoTune()
const { return MW_autoTune; }
void MWTargetNetworkImpl::deallocate() {
for (int
i = 0; i < memBuffer.size(); i++) {
float *memPtr = memBuffer[i];
if (memPtr) {
CUDA_FREE_CALL(memPtr);
}
}
memBuffer.clear();
for (int i = 0; i <
mtolGPkUMBYDlSSqrRzc.size(); i++) {
float *memPtr =
mtolGPkUMBYDlSSqrRzc[i];
if (memPtr) { CUDA_FREE_CALL(memPtr); }
}
mtolGPkUMBYDlSSqrRzc.clear();
}
void MWTargetNetworkImpl::cleanup() {
if
(xcusoQxPPodcHwVviCWI) { destroyWorkSpace(xcusoQxPPodcHwVviCWI); }
if
(NldNILHvuQqQPSAHXxdT) {
hipblasDestroy(*NldNILHvuQqQPSAHXxdT);
delete
NldNILHvuQqQPSAHXxdT;
}
if (NmExSIssnXpisMKKatUq) {
cudnnDestroy(*NmExSIssnXpisMKKatUq);
delete NmExSIssnXpisMKKatUq;
}
}
float *MWTargetNetworkImpl::getBufferPtr(int bufferIndex) {
return
memBuffer[bufferIndex];
}
MWTargetNetworkImpl::~MWTargetNetworkImpl() {} | 9df24d9c815d8ce7e1ad5cc602a5f7c9ccd31242.cu | #include "MWTargetNetworkImpl.hpp"
#include "MWCNNLayerImpl.hpp"
#include <cassert>
#include <cmath>
#include <algorithm>
void MWTargetNetworkImpl::allocate(int BufSize, int numBufsToAlloc) {
numBufs
= numBufsToAlloc;
for (int i = 0; i < numBufs; i++) {
float *memPtr = 0;
CUDA_CALL(cudaMalloc((void **) &memPtr, sizeof(float) * BufSize));
memBuffer.push_back(memPtr);
}
}
void
MWTargetNetworkImpl::allocatePermuteBuffers(int bufSize, int numBufsToAlloc) {
for (int i = 0; i < numBufsToAlloc; i++) {
float *memPtr = 0;
CUDA_CALL(cudaMalloc((void **) &memPtr, sizeof(float) * bufSize));
mtolGPkUMBYDlSSqrRzc.push_back(memPtr);
}
}
void
MWTargetNetworkImpl::preSetup() {
NldNILHvuQqQPSAHXxdT = new
cublasHandle_t;
if (!NldNILHvuQqQPSAHXxdT) {
MWCNNLayerImpl::throwAllocationError(__LINE__, __FILE__);
}
cublasCreate(NldNILHvuQqQPSAHXxdT);
NmExSIssnXpisMKKatUq = new
cudnnHandle_t;
if (!NmExSIssnXpisMKKatUq) {
MWCNNLayerImpl::throwAllocationError(__LINE__, __FILE__);
}
cudnnCreate(NmExSIssnXpisMKKatUq);
}
void
MWTargetNetworkImpl::postSetup(MWCNNLayer *layers[], int numLayers) {
if
(*getProposedWorkSpaceSize() > *getAllocatedWorkSpaceSize()) {
if
(xcusoQxPPodcHwVviCWI) { destroyWorkSpace(xcusoQxPPodcHwVviCWI); }
createWorkSpace(xcusoQxPPodcHwVviCWI);
while ((!xcusoQxPPodcHwVviCWI) &&
(*getProposedWorkSpaceSize() > 0)) {
setProposedWorkSpaceSize(MWTargetNetworkImpl::getNextProposedWorkSpaceSize(*getProposedWorkSpaceSize()));
createWorkSpace(xcusoQxPPodcHwVviCWI);
}
}
for (int i = 0; i < numLayers; i++) { layers[i]->postSetup(); }
}
size_t
MWTargetNetworkImpl::getNextProposedWorkSpaceSize(size_t failedWorkSpaceSize) {
assert(failedWorkSpaceSize > 0);
return failedWorkSpaceSize / 2;
}
void
MWTargetNetworkImpl::createWorkSpace(float *&xkUNToJIgvoLoUQuzKRF) {
cudaError_t rlQsibXJSWJVnUVpdNeL = cudaMalloc((void **) &xkUNToJIgvoLoUQuzKRF,
*getProposedWorkSpaceSize());
if (rlQsibXJSWJVnUVpdNeL != cudaSuccess) {
xkUNToJIgvoLoUQuzKRF = NULL;
setAllocatedWorkSpaceSize(0);
rlQsibXJSWJVnUVpdNeL = cudaGetLastError();
} else {
setAllocatedWorkSpaceSize(*getProposedWorkSpaceSize());
}
}
void
MWTargetNetworkImpl::destroyWorkSpace(float *&xkUNToJIgvoLoUQuzKRF) {
CUDA_FREE_CALL(xkUNToJIgvoLoUQuzKRF);
xkUNToJIgvoLoUQuzKRF = NULL;
setAllocatedWorkSpaceSize(0);
}
void
MWTargetNetworkImpl::setProposedWorkSpaceSize(size_t wss) {
ncMionCCOTOYjWcmaIVD = wss;
}
size_t *
MWTargetNetworkImpl::getProposedWorkSpaceSize() {
return
&ncMionCCOTOYjWcmaIVD;
}
void
MWTargetNetworkImpl::setAllocatedWorkSpaceSize(size_t wss) {
GvDXGhRLfipwBoRPoGfI = wss;
}
size_t *
MWTargetNetworkImpl::getAllocatedWorkSpaceSize() {
return
&GvDXGhRLfipwBoRPoGfI;
}
float *
MWTargetNetworkImpl::getWorkSpace() { return xcusoQxPPodcHwVviCWI; }
float *
MWTargetNetworkImpl::getPermuteBuffer(int bufIndex) {
return
mtolGPkUMBYDlSSqrRzc[bufIndex];
}
cublasHandle_t *
MWTargetNetworkImpl::getCublasHandle() { return NldNILHvuQqQPSAHXxdT; }
cudnnHandle_t *MWTargetNetworkImpl::getCudnnHandle() {
return
NmExSIssnXpisMKKatUq;
}
void MWTargetNetworkImpl::setAutoTune(bool
autotune) { MW_autoTune = autotune; }
bool MWTargetNetworkImpl::getAutoTune()
const { return MW_autoTune; }
void MWTargetNetworkImpl::deallocate() {
for (int
i = 0; i < memBuffer.size(); i++) {
float *memPtr = memBuffer[i];
if (memPtr) {
CUDA_FREE_CALL(memPtr);
}
}
memBuffer.clear();
for (int i = 0; i <
mtolGPkUMBYDlSSqrRzc.size(); i++) {
float *memPtr =
mtolGPkUMBYDlSSqrRzc[i];
if (memPtr) { CUDA_FREE_CALL(memPtr); }
}
mtolGPkUMBYDlSSqrRzc.clear();
}
void MWTargetNetworkImpl::cleanup() {
if
(xcusoQxPPodcHwVviCWI) { destroyWorkSpace(xcusoQxPPodcHwVviCWI); }
if
(NldNILHvuQqQPSAHXxdT) {
cublasDestroy(*NldNILHvuQqQPSAHXxdT);
delete
NldNILHvuQqQPSAHXxdT;
}
if (NmExSIssnXpisMKKatUq) {
cudnnDestroy(*NmExSIssnXpisMKKatUq);
delete NmExSIssnXpisMKKatUq;
}
}
float *MWTargetNetworkImpl::getBufferPtr(int bufferIndex) {
return
memBuffer[bufferIndex];
}
MWTargetNetworkImpl::~MWTargetNetworkImpl() {} |
b022bc08c6a1a73006affbb0bf27032ee2492100.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/char_types/char_cases.h>
#include <strings/char_types/is_flags.h>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/case.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Used as template parameter to divide size calculation from
* the actual string operation within a function.
*
* Useful when most of the logic is identical for both passes.
*/
enum TwoPass {
SizeOnly = 0, ///< calculate the size only
ExecuteOp ///< run the string operation
};
/**
* @brief Per string logic for case conversion functions.
*
* @tparam Pass Determines if size calculation or output write is begin performed.
*/
template <TwoPass Pass = SizeOnly>
struct upper_lower_fn {
const column_device_view d_column;
character_flags_table_type case_flag; // flag to check with on each character
const character_flags_table_type* d_flags;
const character_cases_table_type* d_case_table;
const special_case_mapping* d_special_case_mapping;
const int32_t* d_offsets{};
char* d_chars{};
__device__ special_case_mapping get_special_case_mapping(uint32_t code_point)
{
return d_special_case_mapping[get_special_case_hash_index(code_point)];
}
// compute-size / copy the bytes representing the special case mapping for this codepoint
__device__ int32_t handle_special_case_bytes(uint32_t code_point,
char*& d_buffer,
detail::character_flags_table_type flag)
{
special_case_mapping m = get_special_case_mapping(code_point);
size_type bytes = 0;
auto const count = IS_LOWER(flag) ? m.num_upper_chars : m.num_lower_chars;
auto const* chars = IS_LOWER(flag) ? m.upper : m.lower;
for (uint16_t idx = 0; idx < count; idx++) {
if (Pass == SizeOnly) {
bytes += detail::bytes_in_char_utf8(detail::codepoint_to_utf8(chars[idx]));
} else {
bytes += detail::from_char_utf8(detail::codepoint_to_utf8(chars[idx]), d_buffer + bytes);
}
}
if (d_buffer != nullptr) { d_buffer += bytes; }
return bytes;
}
__device__ int32_t operator()(size_type idx)
{
if (d_column.is_null(idx)) return 0; // null string
string_view d_str = d_column.template element<string_view>(idx);
int32_t bytes = 0;
char* d_buffer = nullptr;
if (Pass == ExecuteOp) d_buffer = d_chars + d_offsets[idx];
for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) {
uint32_t code_point = detail::utf8_to_codepoint(*itr);
detail::character_flags_table_type flag = code_point <= 0x00FFFF ? d_flags[code_point] : 0;
// we apply special mapping in two cases:
// - uncased characters with the special mapping flag, always
// - cased characters with the special mapping flag, when matching the input case_flag
//
if (IS_SPECIAL(flag) && ((flag & case_flag) || !IS_UPPER_OR_LOWER(flag))) {
bytes += handle_special_case_bytes(code_point, d_buffer, case_flag);
} else if (flag & case_flag) {
if (Pass == SizeOnly)
bytes += detail::bytes_in_char_utf8(detail::codepoint_to_utf8(d_case_table[code_point]));
else
d_buffer +=
detail::from_char_utf8(detail::codepoint_to_utf8(d_case_table[code_point]), d_buffer);
} else {
if (Pass == SizeOnly)
bytes += detail::bytes_in_char_utf8(*itr);
else
d_buffer += detail::from_char_utf8(*itr, d_buffer);
}
}
return bytes;
}
};
/**
* @brief Utility method for converting upper and lower case characters
* in a strings column.
*
* @param strings Strings to convert.
* @param case_flag The character type to convert (upper, lower, or both)
* @param mr Memory resource to use for allocation.
* @param stream Stream to use for any kernels launched.
* @return New strings column with characters converted.
*/
std::unique_ptr<column> convert_case(strings_column_view const& strings,
character_flags_table_type case_flag,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto strings_count = strings.size();
if (strings_count == 0) return detail::make_empty_strings_column(mr, stream);
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
size_type null_count = strings.null_count();
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// get the lookup tables used for case conversion
auto d_flags = get_character_flags_table();
auto d_case_table = get_character_cases_table();
auto d_special_case_mapping = get_special_case_mapping_table();
// build offsets column -- calculate the size of each output string
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0),
upper_lower_fn<SizeOnly>{d_column, case_flag, d_flags, d_case_table, d_special_case_mapping});
auto offsets_column = detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.data<int32_t>();
// build the chars column -- convert characters based on case_flag parameter
size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count];
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
upper_lower_fn<ExecuteOp>{
d_column, case_flag, d_flags, d_case_table, d_special_case_mapping, d_new_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace
std::unique_ptr<column> to_lower(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
character_flags_table_type case_flag = IS_UPPER(0xFF); // convert only upper case characters
return convert_case(strings, case_flag, mr, stream);
}
//
std::unique_ptr<column> to_upper(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
character_flags_table_type case_flag = IS_LOWER(0xFF); // convert only lower case characters
return convert_case(strings, case_flag, mr, stream);
}
//
std::unique_ptr<column> swapcase(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
// convert only upper or lower case characters
character_flags_table_type case_flag = IS_LOWER(0xFF) | IS_UPPER(0xFF);
return convert_case(strings, case_flag, mr, stream);
}
} // namespace detail
// APIs
std::unique_ptr<column> to_lower(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_lower(strings, mr);
}
std::unique_ptr<column> to_upper(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_upper(strings, mr);
}
std::unique_ptr<column> swapcase(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::swapcase(strings, mr);
}
} // namespace strings
} // namespace cudf
| b022bc08c6a1a73006affbb0bf27032ee2492100.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <strings/char_types/char_cases.h>
#include <strings/char_types/is_flags.h>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/case.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <cudf/utilities/error.hpp>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
namespace cudf {
namespace strings {
namespace detail {
namespace {
/**
* @brief Used as template parameter to divide size calculation from
* the actual string operation within a function.
*
* Useful when most of the logic is identical for both passes.
*/
enum TwoPass {
SizeOnly = 0, ///< calculate the size only
ExecuteOp ///< run the string operation
};
/**
* @brief Per string logic for case conversion functions.
*
* @tparam Pass Determines if size calculation or output write is begin performed.
*/
template <TwoPass Pass = SizeOnly>
struct upper_lower_fn {
const column_device_view d_column;
character_flags_table_type case_flag; // flag to check with on each character
const character_flags_table_type* d_flags;
const character_cases_table_type* d_case_table;
const special_case_mapping* d_special_case_mapping;
const int32_t* d_offsets{};
char* d_chars{};
__device__ special_case_mapping get_special_case_mapping(uint32_t code_point)
{
return d_special_case_mapping[get_special_case_hash_index(code_point)];
}
// compute-size / copy the bytes representing the special case mapping for this codepoint
__device__ int32_t handle_special_case_bytes(uint32_t code_point,
char*& d_buffer,
detail::character_flags_table_type flag)
{
special_case_mapping m = get_special_case_mapping(code_point);
size_type bytes = 0;
auto const count = IS_LOWER(flag) ? m.num_upper_chars : m.num_lower_chars;
auto const* chars = IS_LOWER(flag) ? m.upper : m.lower;
for (uint16_t idx = 0; idx < count; idx++) {
if (Pass == SizeOnly) {
bytes += detail::bytes_in_char_utf8(detail::codepoint_to_utf8(chars[idx]));
} else {
bytes += detail::from_char_utf8(detail::codepoint_to_utf8(chars[idx]), d_buffer + bytes);
}
}
if (d_buffer != nullptr) { d_buffer += bytes; }
return bytes;
}
__device__ int32_t operator()(size_type idx)
{
if (d_column.is_null(idx)) return 0; // null string
string_view d_str = d_column.template element<string_view>(idx);
int32_t bytes = 0;
char* d_buffer = nullptr;
if (Pass == ExecuteOp) d_buffer = d_chars + d_offsets[idx];
for (auto itr = d_str.begin(); itr != d_str.end(); ++itr) {
uint32_t code_point = detail::utf8_to_codepoint(*itr);
detail::character_flags_table_type flag = code_point <= 0x00FFFF ? d_flags[code_point] : 0;
// we apply special mapping in two cases:
// - uncased characters with the special mapping flag, always
// - cased characters with the special mapping flag, when matching the input case_flag
//
if (IS_SPECIAL(flag) && ((flag & case_flag) || !IS_UPPER_OR_LOWER(flag))) {
bytes += handle_special_case_bytes(code_point, d_buffer, case_flag);
} else if (flag & case_flag) {
if (Pass == SizeOnly)
bytes += detail::bytes_in_char_utf8(detail::codepoint_to_utf8(d_case_table[code_point]));
else
d_buffer +=
detail::from_char_utf8(detail::codepoint_to_utf8(d_case_table[code_point]), d_buffer);
} else {
if (Pass == SizeOnly)
bytes += detail::bytes_in_char_utf8(*itr);
else
d_buffer += detail::from_char_utf8(*itr, d_buffer);
}
}
return bytes;
}
};
/**
* @brief Utility method for converting upper and lower case characters
* in a strings column.
*
* @param strings Strings to convert.
* @param case_flag The character type to convert (upper, lower, or both)
* @param mr Memory resource to use for allocation.
* @param stream Stream to use for any kernels launched.
* @return New strings column with characters converted.
*/
std::unique_ptr<column> convert_case(strings_column_view const& strings,
character_flags_table_type case_flag,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto strings_count = strings.size();
if (strings_count == 0) return detail::make_empty_strings_column(mr, stream);
auto execpol = rmm::exec_policy(stream);
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
size_type null_count = strings.null_count();
// copy null mask
rmm::device_buffer null_mask = copy_bitmask(strings.parent(), stream, mr);
// get the lookup tables used for case conversion
auto d_flags = get_character_flags_table();
auto d_case_table = get_character_cases_table();
auto d_special_case_mapping = get_special_case_mapping_table();
// build offsets column -- calculate the size of each output string
auto offsets_transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0),
upper_lower_fn<SizeOnly>{d_column, case_flag, d_flags, d_case_table, d_special_case_mapping});
auto offsets_column = detail::make_offsets_child_column(
offsets_transformer_itr, offsets_transformer_itr + strings_count, mr, stream);
auto offsets_view = offsets_column->view();
auto d_new_offsets = offsets_view.data<int32_t>();
// build the chars column -- convert characters based on case_flag parameter
size_type bytes = thrust::device_pointer_cast(d_new_offsets)[strings_count];
auto chars_column =
strings::detail::create_chars_child_column(strings_count, null_count, bytes, mr, stream);
auto chars_view = chars_column->mutable_view();
auto d_chars = chars_view.data<char>();
thrust::for_each_n(
execpol->on(stream),
thrust::make_counting_iterator<size_type>(0),
strings_count,
upper_lower_fn<ExecuteOp>{
d_column, case_flag, d_flags, d_case_table, d_special_case_mapping, d_new_offsets, d_chars});
//
return make_strings_column(strings_count,
std::move(offsets_column),
std::move(chars_column),
null_count,
std::move(null_mask),
stream,
mr);
}
} // namespace
std::unique_ptr<column> to_lower(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
character_flags_table_type case_flag = IS_UPPER(0xFF); // convert only upper case characters
return convert_case(strings, case_flag, mr, stream);
}
//
std::unique_ptr<column> to_upper(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
character_flags_table_type case_flag = IS_LOWER(0xFF); // convert only lower case characters
return convert_case(strings, case_flag, mr, stream);
}
//
std::unique_ptr<column> swapcase(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
// convert only upper or lower case characters
character_flags_table_type case_flag = IS_LOWER(0xFF) | IS_UPPER(0xFF);
return convert_case(strings, case_flag, mr, stream);
}
} // namespace detail
// APIs
std::unique_ptr<column> to_lower(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_lower(strings, mr);
}
std::unique_ptr<column> to_upper(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::to_upper(strings, mr);
}
std::unique_ptr<column> swapcase(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::swapcase(strings, mr);
}
} // namespace strings
} // namespace cudf
|
78b99f1c87f5274387c3b4fd246274ad7322d371.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
//#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>
#include <stdio.h>
#include <cmath>
#include "rotate_rect_ops.h"
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
if (error != hipSuccess) { \
std::cout << hipGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
#if 1
__device__ inline float devRotateIoU(float const * const region1, float const * const region2) {
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
float iou = area_inter / (area1 + area2 - area_inter + 1e-8);
// printf("area1: %.3f, area2: %.3f, area_inter: %.3f, iou: %.3f\n",
// area1, area2, area_inter, iou);
return iou;
}
#else
template <typename T>
__device__ inline float devRotateIoU(T const * const region1, T const * const region2) {
return computeRectIoU(region1, region2);
}
#endif
__global__ void rotate_nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// cache all column data in this block
__shared__ float block_boxes[threadsPerBlock * 6];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 6 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0];
block_boxes[threadIdx.x * 6 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1];
block_boxes[threadIdx.x * 6 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2];
block_boxes[threadIdx.x * 6 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3];
block_boxes[threadIdx.x * 6 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4];
block_boxes[threadIdx.x * 6 + 5] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5];
}
__syncthreads();
// iterate across each row in this block
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; // current row
const float *cur_box = dev_boxes + cur_box_idx * 6;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1; // if they are the same, skip to next (column)
}
// for this row, calculate all ious with each column
for (i = start; i < col_size; i++) {
float iou = devRotateIoU(cur_box, block_boxes + i * 6);
// printf("iou: %.3f\n", iou);
if (iou > nms_overlap_thresh) {
t |= 1ULL << i; // basically storing all overlaps across the columns, hashed into one single ULL index
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int col_start = blockIdx.y;
const int row_start = blockIdx.x;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
__shared__ float block_query_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 5 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_query_boxes[threadIdx.x * 5 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_query_boxes[threadIdx.x * 5 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_query_boxes[threadIdx.x * 5 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_query_boxes[threadIdx.x * 5 + 4] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5);
}
}
}
void _iou_matrix_launcher(float* overlaps, const float* boxes, const float* query_boxes,
int n, int k, hipStream_t stream)
{
dim3 blocks(THCCeilDiv(n, threadsPerBlock),
THCCeilDiv(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( overlaps_kernel), dim3(blocks), dim3(threads), 0, stream, n, k,
boxes,
query_boxes,
overlaps);
hipDeviceSynchronize();
}
// boxes is a N x 6 tensor
at::Tensor rotate_nms_cuda(const at::Tensor& boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 5);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( rotate_nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
at::Tensor rotate_iou_matrix_cuda(
const at::Tensor& r_boxes1, const at::Tensor& r_boxes2
)
{
int N = r_boxes1.size(0);
int M = r_boxes2.size(0);
at::Tensor iou_matrix = at::zeros({N, M}, r_boxes1.options());
if (N == 0 || M == 0)
return iou_matrix;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
_iou_matrix_launcher(iou_matrix.data<float>(), r_boxes1.contiguous().data<float>(),
r_boxes2.contiguous().data<float>(), N, M, stream);
THCudaCheck(hipGetLastError());
return iou_matrix;
} | 78b99f1c87f5274387c3b4fd246274ad7322d371.cu |
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
//#include <torch/extension.h>
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
#include <stdio.h>
#include <cmath>
#include "rotate_rect_ops.h"
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
std::cout << cudaGetErrorString(error) << std::endl; \
} \
} while (0)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
int const threadsPerBlock = sizeof(unsigned long long) * 8;
#if 1
__device__ inline float devRotateIoU(float const * const region1, float const * const region2) {
float area1 = region1[2] * region1[3];
float area2 = region2[2] * region2[3];
float area_inter = inter(region1, region2);
float iou = area_inter / (area1 + area2 - area_inter + 1e-8);
// printf("area1: %.3f, area2: %.3f, area_inter: %.3f, iou: %.3f\n",
// area1, area2, area_inter, iou);
return iou;
}
#else
template <typename T>
__device__ inline float devRotateIoU(T const * const region1, T const * const region2) {
return computeRectIoU(region1, region2);
}
#endif
__global__ void rotate_nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// cache all column data in this block
__shared__ float block_boxes[threadsPerBlock * 6];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 6 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0];
block_boxes[threadIdx.x * 6 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1];
block_boxes[threadIdx.x * 6 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2];
block_boxes[threadIdx.x * 6 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3];
block_boxes[threadIdx.x * 6 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4];
block_boxes[threadIdx.x * 6 + 5] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5];
}
__syncthreads();
// iterate across each row in this block
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; // current row
const float *cur_box = dev_boxes + cur_box_idx * 6;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1; // if they are the same, skip to next (column)
}
// for this row, calculate all ious with each column
for (i = start; i < col_size; i++) {
float iou = devRotateIoU(cur_box, block_boxes + i * 6);
// printf("iou: %.3f\n", iou);
if (iou > nms_overlap_thresh) {
t |= 1ULL << i; // basically storing all overlaps across the columns, hashed into one single ULL index
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
__global__ void overlaps_kernel(const int N, const int K, const float* dev_boxes,
const float * dev_query_boxes, float* dev_overlaps) {
const int col_start = blockIdx.y;
const int row_start = blockIdx.x;
const int row_size =
min(N - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(K - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
__shared__ float block_query_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_query_boxes[threadIdx.x * 5 + 0] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_query_boxes[threadIdx.x * 5 + 1] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_query_boxes[threadIdx.x * 5 + 2] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_query_boxes[threadIdx.x * 5 + 3] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_query_boxes[threadIdx.x * 5 + 4] =
dev_query_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
if (threadIdx.x < row_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * row_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
for(int i = 0;i < col_size; i++) {
int offset = row_start*threadsPerBlock * K + col_start*threadsPerBlock + threadIdx.x*K+ i ;
dev_overlaps[offset] = devRotateIoU(block_boxes + threadIdx.x * 5, block_query_boxes + i * 5);
}
}
}
void _iou_matrix_launcher(float* overlaps, const float* boxes, const float* query_boxes,
int n, int k, cudaStream_t stream)
{
dim3 blocks(THCCeilDiv(n, threadsPerBlock),
THCCeilDiv(k, threadsPerBlock));
dim3 threads(threadsPerBlock);
overlaps_kernel<<<blocks, threads, 0, stream>>>(n, k,
boxes,
query_boxes,
overlaps);
cudaThreadSynchronize();
}
// boxes is a N x 6 tensor
at::Tensor rotate_nms_cuda(const at::Tensor& boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
auto scores = boxes.select(1, 5);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
rotate_nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
at::Tensor rotate_iou_matrix_cuda(
const at::Tensor& r_boxes1, const at::Tensor& r_boxes2
)
{
int N = r_boxes1.size(0);
int M = r_boxes2.size(0);
at::Tensor iou_matrix = at::zeros({N, M}, r_boxes1.options());
if (N == 0 || M == 0)
return iou_matrix;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
_iou_matrix_launcher(iou_matrix.data<float>(), r_boxes1.contiguous().data<float>(),
r_boxes2.contiguous().data<float>(), N, M, stream);
THCudaCheck(cudaGetLastError());
return iou_matrix;
} |
7dddb8bb9058907522a93e95fa7826f9e4d4ae05.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <stdio.h>
#include <iostream>
/* we need these includes for CUDA's random number stuff */
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
using namespace std;
#define N 5
#define MAX 100
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, hiprandState_t* states) {
/* we have to initialize the state */
hiprand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(hiprandState_t* states, float* numbers) {
/* hiprand works like rand - except that it takes a state as a parameter */
numbers[blockIdx.x] = hiprand_uniform(&states[blockIdx.x])*100;//% 100;
numbers[blockIdx.x+N] = hiprand_uniform(&states[blockIdx.x])*(50-20)+20;
}
int main( ) {
/* CUDA's random number library uses hiprandState_t to keep track of the seed value
we will store a random state for every thread */
hiprandState_t* states;
/* allocate space on the GPU for the random states */
hipMalloc((void**) &states, 2*N * sizeof(hiprandState_t));
/* invoke the GPU to initialize all of the random states */
hipLaunchKernelGGL(( init), dim3(N), dim3(1), 0, 0, time(0), states);
/* allocate an array of unsigned ints on the CPU and GPU */
float cpu_nums[2*N];
float* gpu_nums;
hipMalloc((void**) &gpu_nums, 2*N * sizeof(float));
/* invoke the kernel to get some random numbers */
hipLaunchKernelGGL(( randoms), dim3(N), dim3(1), 0, 0, states, gpu_nums);
/* copy the random numbers back */
hipMemcpy(cpu_nums, gpu_nums, 2* N * sizeof(float), hipMemcpyDeviceToHost);
/* print them out */
for (int i = 0; i < 2*N; i++) {
// printf("%u\n", cpu_nums[i]);
cout<<cpu_nums[i]<<endl;
}
/* free the memory we allocated for the states and numbers */
hipFree(states);
hipFree(gpu_nums);
return 0;
}
| 7dddb8bb9058907522a93e95fa7826f9e4d4ae05.cu | #include <unistd.h>
#include <stdio.h>
#include <iostream>
/* we need these includes for CUDA's random number stuff */
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
#define N 5
#define MAX 100
/* this GPU kernel function is used to initialize the random states */
__global__ void init(unsigned int seed, curandState_t* states) {
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
blockIdx.x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[blockIdx.x]);
}
/* this GPU kernel takes an array of states, and an array of ints, and puts a random int into each */
__global__ void randoms(curandState_t* states, float* numbers) {
/* curand works like rand - except that it takes a state as a parameter */
numbers[blockIdx.x] = curand_uniform(&states[blockIdx.x])*100;//% 100;
numbers[blockIdx.x+N] = curand_uniform(&states[blockIdx.x])*(50-20)+20;
}
int main( ) {
/* CUDA's random number library uses curandState_t to keep track of the seed value
we will store a random state for every thread */
curandState_t* states;
/* allocate space on the GPU for the random states */
cudaMalloc((void**) &states, 2*N * sizeof(curandState_t));
/* invoke the GPU to initialize all of the random states */
init<<<N, 1>>>(time(0), states);
/* allocate an array of unsigned ints on the CPU and GPU */
float cpu_nums[2*N];
float* gpu_nums;
cudaMalloc((void**) &gpu_nums, 2*N * sizeof(float));
/* invoke the kernel to get some random numbers */
randoms<<<N, 1>>>(states, gpu_nums);
/* copy the random numbers back */
cudaMemcpy(cpu_nums, gpu_nums, 2* N * sizeof(float), cudaMemcpyDeviceToHost);
/* print them out */
for (int i = 0; i < 2*N; i++) {
// printf("%u\n", cpu_nums[i]);
cout<<cpu_nums[i]<<endl;
}
/* free the memory we allocated for the states and numbers */
cudaFree(states);
cudaFree(gpu_nums);
return 0;
}
|
6d2bb3d63b427a5bfbafbd4774312e1dda2f4bb6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "saxpy_float4s_shmem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float a = 2;
clock_t *timer_vals = NULL;
hipMalloc(&timer_vals, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
saxpy_float4s_shmem), dim3(gridBlock),dim3(threadBlock), 0, 0, y,x,a,timer_vals);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
saxpy_float4s_shmem), dim3(gridBlock),dim3(threadBlock), 0, 0, y,x,a,timer_vals);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
saxpy_float4s_shmem), dim3(gridBlock),dim3(threadBlock), 0, 0, y,x,a,timer_vals);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6d2bb3d63b427a5bfbafbd4774312e1dda2f4bb6.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "saxpy_float4s_shmem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float a = 2;
clock_t *timer_vals = NULL;
cudaMalloc(&timer_vals, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
saxpy_float4s_shmem<<<gridBlock,threadBlock>>>(y,x,a,timer_vals);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
saxpy_float4s_shmem<<<gridBlock,threadBlock>>>(y,x,a,timer_vals);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
saxpy_float4s_shmem<<<gridBlock,threadBlock>>>(y,x,a,timer_vals);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1648dacc52949b40115eab10bcefa0c62018a2f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
struct S1_t { static const int value = 4; };
template <int X, typename T2>
__device__ void foo(int *p1, int *p2) {
// no argument specified, loop will be completely unrolled
#pragma unroll
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 2;
// unroll value = 8
#pragma unroll (X+1)
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 4;
// unroll value = 1, loop unrolling disabled
#pragma unroll 1
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 8;
// unroll value = 4
#pragma unroll (T2::value)
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 16;
}
__global__ void bar(int *p1, int *p2) {
foo<7, S1_t>(p1, p2);
}
| 1648dacc52949b40115eab10bcefa0c62018a2f1.cu | struct S1_t { static const int value = 4; };
template <int X, typename T2>
__device__ void foo(int *p1, int *p2) {
// no argument specified, loop will be completely unrolled
#pragma unroll
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 2;
// unroll value = 8
#pragma unroll (X+1)
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 4;
// unroll value = 1, loop unrolling disabled
#pragma unroll 1
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 8;
// unroll value = 4
#pragma unroll (T2::value)
for (int i = 0; i < 12; ++i)
p1[i] += p2[i] * 16;
}
__global__ void bar(int *p1, int *p2) {
foo<7, S1_t>(p1, p2);
}
|
b7f7200bafcc1181a9de86d2b61c94df71e3956b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* *
* EN.605.417.FA17 - Bill Burgett
*
* This program is to test the functionality and compare execution times
* of host and global memory. Specifically, it utilizes both pageable
* and pinned memory on the host, and copies the specified memory to the
* device for execution. Enable debug to compare outputs of the device
* against outputs from the host to verify that the functions executed
* correctly. The inputs are fairly simple with the first being an array
* counting from 0 to WORK_SIZE, and the second input is just double the first.
* The function operating on both the host and the device is simply adding
* these arrays together.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
//set the size of the input arrays
static const int WORK_SIZE = 4096;
/*
* function can be called from host or kernel to perform addition.
*/
__host__ __device__ unsigned int devAdd(unsigned int num_a, unsigned int num_b) {
return num_a+num_b;
}
/*
* Helper function to get the time for execution length comparisons.
* source: Module 4 Activity Files: global_memory.cu
* https://github.com/JHU-EP-Intro2GPU/EN605.417.FA/blob/master/module4/global_memory.cu
*/
__host__ hipEvent_t get_time(void)
{
hipEvent_t time;
hipEventCreate(&time);
hipEventRecord(time);
return time;
}
/*
* CUDA kernel function that adds the elements from two arrays and stores result in the first.
*/
__global__ void devAdd(void *d_a, void *d_b) {
unsigned int *id_a = (unsigned int*) d_a;
unsigned int *id_b = (unsigned int*) d_b;
id_a[threadIdx.x] = devAdd(id_a[threadIdx.x],id_b[threadIdx.x]);
}
/*
* Host function to create the paged memory and execute the kernel function
*/
float PagedAdd(int debugFlag)
{
void *d_a = NULL;
void *d_b = NULL;
unsigned int idata_a[WORK_SIZE], idata_b[WORK_SIZE], odata[WORK_SIZE];
int i;
//device
hipMalloc((void**)&d_a, sizeof(int) * WORK_SIZE);
hipMalloc((void**)&d_b, sizeof(int) * WORK_SIZE);
//fill input data arrays
for (i = 0; i < WORK_SIZE; i++){
idata_a[i] = (unsigned int)i;
idata_b[i] = (unsigned int)i * 2;
}
//start the clock
hipEvent_t start_time = get_time();
//transfer paged memory to device
hipMemcpy(d_a, idata_a, sizeof(int) * WORK_SIZE, hipMemcpyHostToDevice);
hipMemcpy(d_b, idata_b, sizeof(int) * WORK_SIZE, hipMemcpyHostToDevice);
//do the addition
hipLaunchKernelGGL(( devAdd), dim3(1), dim3(WORK_SIZE), WORK_SIZE * sizeof(int), 0, d_a,d_b);
//wait for the GPU launched work to complete
hipDeviceSynchronize();
hipGetLastError();
//stop the clock
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
//find execution time
float delta = 0;
hipEventElapsedTime(&delta, start_time, end_time);
//get result
hipMemcpy(odata, d_a, sizeof(int) * WORK_SIZE, hipMemcpyDeviceToHost);
//is debug on?
if (debugFlag == 1) {
//Print input, device result, and do the result on the host as well to check
for (i = 0; i < WORK_SIZE; i++)
printf("Pageable - Input values: %u + %u, device output: %u, host output: %u\n", idata_a[i], idata_b[i], odata[i], devAdd(idata_a[i], idata_b[i]));
}
hipFree((void* ) d_a);
hipFree((void* ) d_b);
hipDeviceReset();
return delta;
}
/*
* Host function to create the paged memory and execute the kernel function
*
* This was modified from the sample on slide 11 of the mod 4 video slides (Module4AHostMemory.ppt)
* source: Mod 4 video - CUDA Host Memory
* https://devblogs.nvidia.com/parallelforall/how-optimize-data-transfers-cuda-cc
*/
float PinnedAdd(int debugFlag) {
//unsigned int nElements = 4 * 1024 * 1024;
const unsigned int bytes = WORK_SIZE * sizeof(int);
unsigned int *h_aPageable, *h_bPageable;
unsigned int *h_aPinned, *h_bPinned;
unsigned int *d_a, *d_b;
unsigned int *h_Result;
int i;
//result
h_Result = (unsigned int*)malloc(bytes);
//host pageable
h_aPageable = (unsigned int*)malloc(bytes);
h_bPageable = (unsigned int*)malloc(bytes);
//host pinned
hipHostMalloc((void**)&h_aPinned, bytes);
hipHostMalloc((void**)&h_bPinned, bytes);
//device
hipMalloc((void**)&d_a, bytes);
hipMalloc((void**)&d_b, bytes);
//fill input data arrays
for (int i = 0; i < WORK_SIZE; i++) {
h_aPageable[i] = (unsigned int)i;
h_bPageable[i] = (unsigned int)i * 2;
}
//fill pinned memory
memcpy(h_aPinned, h_aPageable, bytes);
memcpy(h_bPinned, h_bPageable, bytes);
//start the clock
hipEvent_t start_time = get_time();
//transfer pinned memory to device
hipMemcpy(d_a, h_aPinned, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_bPinned, bytes, hipMemcpyHostToDevice);
//do the addition
devAdd << <1, WORK_SIZE, WORK_SIZE * sizeof(int) >> >(d_a, d_b);
//wait for the GPU launched work to complete
hipDeviceSynchronize();
hipGetLastError();
//stop the clock
hipEvent_t end_time = get_time();
hipEventSynchronize(end_time);
//find execution time
float delta = 0;
hipEventElapsedTime(&delta, start_time, end_time);
//get device result
hipMemcpy(h_Result, d_a, bytes, hipMemcpyDeviceToHost);
//is debug on?
if (debugFlag == 1) {
//Print input, device result, and do the result on the host as well to check
for (i = 0; i < WORK_SIZE; i++)
printf("Pinned - Input values: %u + %u, device output: %u, host output: %u\n", h_aPinned[i], h_bPinned[i], h_Result[i], devAdd(h_aPinned[i], h_bPinned[i]));
}
//free memory
hipFree(d_a);
hipFree(d_b);
hipHostFree(h_aPinned);
hipHostFree(h_bPinned);
free(h_aPageable);
free(h_bPageable);
free(h_Result);
return delta;
}
/*
* main class to execute the above functions
*/
int main(void) {
int userInput, debugFlag=0;
cout << "Enter 1 to run with debug on, else enter any number: ";
cin >> userInput;
cout << "";
if (userInput == 1)debugFlag = 1;
float durationPinned = PinnedAdd(debugFlag);
float durationPaged = PagedAdd(debugFlag);
printf("Paged time = %fms\nPinned time = %fms\n", durationPaged, durationPinned);
return 0;
} | b7f7200bafcc1181a9de86d2b61c94df71e3956b.cu | /* *
* EN.605.417.FA17 - Bill Burgett
*
* This program is to test the functionality and compare execution times
* of host and global memory. Specifically, it utilizes both pageable
* and pinned memory on the host, and copies the specified memory to the
* device for execution. Enable debug to compare outputs of the device
* against outputs from the host to verify that the functions executed
* correctly. The inputs are fairly simple with the first being an array
* counting from 0 to WORK_SIZE, and the second input is just double the first.
* The function operating on both the host and the device is simply adding
* these arrays together.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
//set the size of the input arrays
static const int WORK_SIZE = 4096;
/*
* function can be called from host or kernel to perform addition.
*/
__host__ __device__ unsigned int devAdd(unsigned int num_a, unsigned int num_b) {
return num_a+num_b;
}
/*
* Helper function to get the time for execution length comparisons.
* source: Module 4 Activity Files: global_memory.cu
* https://github.com/JHU-EP-Intro2GPU/EN605.417.FA/blob/master/module4/global_memory.cu
*/
__host__ cudaEvent_t get_time(void)
{
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
/*
* CUDA kernel function that adds the elements from two arrays and stores result in the first.
*/
__global__ void devAdd(void *d_a, void *d_b) {
unsigned int *id_a = (unsigned int*) d_a;
unsigned int *id_b = (unsigned int*) d_b;
id_a[threadIdx.x] = devAdd(id_a[threadIdx.x],id_b[threadIdx.x]);
}
/*
* Host function to create the paged memory and execute the kernel function
*/
float PagedAdd(int debugFlag)
{
void *d_a = NULL;
void *d_b = NULL;
unsigned int idata_a[WORK_SIZE], idata_b[WORK_SIZE], odata[WORK_SIZE];
int i;
//device
cudaMalloc((void**)&d_a, sizeof(int) * WORK_SIZE);
cudaMalloc((void**)&d_b, sizeof(int) * WORK_SIZE);
//fill input data arrays
for (i = 0; i < WORK_SIZE; i++){
idata_a[i] = (unsigned int)i;
idata_b[i] = (unsigned int)i * 2;
}
//start the clock
cudaEvent_t start_time = get_time();
//transfer paged memory to device
cudaMemcpy(d_a, idata_a, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, idata_b, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice);
//do the addition
devAdd<<<1, WORK_SIZE, WORK_SIZE * sizeof(int)>>>(d_a,d_b);
//wait for the GPU launched work to complete
cudaThreadSynchronize();
cudaGetLastError();
//stop the clock
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
//find execution time
float delta = 0;
cudaEventElapsedTime(&delta, start_time, end_time);
//get result
cudaMemcpy(odata, d_a, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost);
//is debug on?
if (debugFlag == 1) {
//Print input, device result, and do the result on the host as well to check
for (i = 0; i < WORK_SIZE; i++)
printf("Pageable - Input values: %u + %u, device output: %u, host output: %u\n", idata_a[i], idata_b[i], odata[i], devAdd(idata_a[i], idata_b[i]));
}
cudaFree((void* ) d_a);
cudaFree((void* ) d_b);
cudaDeviceReset();
return delta;
}
/*
* Host function to create the paged memory and execute the kernel function
*
* This was modified from the sample on slide 11 of the mod 4 video slides (Module4AHostMemory.ppt)
* source: Mod 4 video - CUDA Host Memory
* https://devblogs.nvidia.com/parallelforall/how-optimize-data-transfers-cuda-cc
*/
float PinnedAdd(int debugFlag) {
//unsigned int nElements = 4 * 1024 * 1024;
const unsigned int bytes = WORK_SIZE * sizeof(int);
unsigned int *h_aPageable, *h_bPageable;
unsigned int *h_aPinned, *h_bPinned;
unsigned int *d_a, *d_b;
unsigned int *h_Result;
int i;
//result
h_Result = (unsigned int*)malloc(bytes);
//host pageable
h_aPageable = (unsigned int*)malloc(bytes);
h_bPageable = (unsigned int*)malloc(bytes);
//host pinned
cudaMallocHost((void**)&h_aPinned, bytes);
cudaMallocHost((void**)&h_bPinned, bytes);
//device
cudaMalloc((void**)&d_a, bytes);
cudaMalloc((void**)&d_b, bytes);
//fill input data arrays
for (int i = 0; i < WORK_SIZE; i++) {
h_aPageable[i] = (unsigned int)i;
h_bPageable[i] = (unsigned int)i * 2;
}
//fill pinned memory
memcpy(h_aPinned, h_aPageable, bytes);
memcpy(h_bPinned, h_bPageable, bytes);
//start the clock
cudaEvent_t start_time = get_time();
//transfer pinned memory to device
cudaMemcpy(d_a, h_aPinned, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_bPinned, bytes, cudaMemcpyHostToDevice);
//do the addition
devAdd << <1, WORK_SIZE, WORK_SIZE * sizeof(int) >> >(d_a, d_b);
//wait for the GPU launched work to complete
cudaThreadSynchronize();
cudaGetLastError();
//stop the clock
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
//find execution time
float delta = 0;
cudaEventElapsedTime(&delta, start_time, end_time);
//get device result
cudaMemcpy(h_Result, d_a, bytes, cudaMemcpyDeviceToHost);
//is debug on?
if (debugFlag == 1) {
//Print input, device result, and do the result on the host as well to check
for (i = 0; i < WORK_SIZE; i++)
printf("Pinned - Input values: %u + %u, device output: %u, host output: %u\n", h_aPinned[i], h_bPinned[i], h_Result[i], devAdd(h_aPinned[i], h_bPinned[i]));
}
//free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFreeHost(h_aPinned);
cudaFreeHost(h_bPinned);
free(h_aPageable);
free(h_bPageable);
free(h_Result);
return delta;
}
/*
* main class to execute the above functions
*/
int main(void) {
int userInput, debugFlag=0;
cout << "Enter 1 to run with debug on, else enter any number: ";
cin >> userInput;
cout << "";
if (userInput == 1)debugFlag = 1;
float durationPinned = PinnedAdd(debugFlag);
float durationPaged = PagedAdd(debugFlag);
printf("Paged time = %fms\nPinned time = %fms\n", durationPaged, durationPinned);
return 0;
} |
4980d960dd37b6b0e4c9d5cfc3b0fee50f0d8a7b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "colorVectorField.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *colors = NULL;
hipMalloc(&colors, XSIZE*YSIZE);
float3 *colorMap = NULL;
hipMalloc(&colorMap, XSIZE*YSIZE);
float2 __restrict__ *field = NULL;
hipMalloc(&field, XSIZE*YSIZE);
dim3 blocks = 1;
unsigned int simWidth = 1;
unsigned int simHeight = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
colorVectorField), dim3(gridBlock),dim3(threadBlock), 0, 0, colors,colorMap,field,blocks,simWidth,simHeight);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
colorVectorField), dim3(gridBlock),dim3(threadBlock), 0, 0, colors,colorMap,field,blocks,simWidth,simHeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
colorVectorField), dim3(gridBlock),dim3(threadBlock), 0, 0, colors,colorMap,field,blocks,simWidth,simHeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 4980d960dd37b6b0e4c9d5cfc3b0fee50f0d8a7b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "colorVectorField.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float3 *colors = NULL;
cudaMalloc(&colors, XSIZE*YSIZE);
float3 *colorMap = NULL;
cudaMalloc(&colorMap, XSIZE*YSIZE);
float2 __restrict__ *field = NULL;
cudaMalloc(&field, XSIZE*YSIZE);
dim3 blocks = 1;
unsigned int simWidth = 1;
unsigned int simHeight = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
colorVectorField<<<gridBlock,threadBlock>>>(colors,colorMap,field,blocks,simWidth,simHeight);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
colorVectorField<<<gridBlock,threadBlock>>>(colors,colorMap,field,blocks,simWidth,simHeight);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
colorVectorField<<<gridBlock,threadBlock>>>(colors,colorMap,field,blocks,simWidth,simHeight);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0e562ef583cc0f262329207e41967a33935121fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
const double EPS = 1.e-15;
__global__ void vecAdd(double *d_a, double *d_b, double *d_c, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
d_c[idx] = d_a[idx] + d_b[idx];
}
}
double timer() {
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-06);
};
int main (int argc, char**argv) {
assert(argc == 2);
int n = atoi(argv[1]);
//host vecs
double *h_a, *h_b, *h_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
for (int i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
double t1 = timer();
for (int i = 0; i < n; i++) {
h_c[i] = h_a[i] + h_b[i];
}
double t2 = timer();
std::cout << "cpu time is: " << t2-t1 << std::endl;
//dev imput vecs
double *d_a, *d_b, *d_c;
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
//num of threads in each thread block
blockSize = 1024;
//num of thread blocks in grid
gridSize = (n-1)/blockSize + 1;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
double t3 = timer();
hipEventRecord(start);
hipLaunchKernelGGL(( vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
hipDeviceSynchronize();
hipEventRecord(stop);
double t4 = timer();
hipMemcpy(h_c, d_c, bytes, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float ms = 0;
hipEventElapsedTime(&ms, start, stop);
std::cout << "(Cuda runtime) Gpu time is: " << t4-t3 << std::endl;
std::cout << "(Timer) Gpu time is: " << ms << std::endl;
for (int i = 0; i < n; i++) {
if (abs(h_c[i]-h_a[i]-h_b[i]) >= EPS) {
std::cout << "CHECK FAILED!" << std::endl;
std::cout << "DIFFER: " << abs(h_c[i] - h_a[i] - h_b[i]) << std::endl;
break;
}
}
std::cout << "Check completed!" << std::endl;
free(h_a);
free(h_b);
free(h_c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 0e562ef583cc0f262329207e41967a33935121fa.cu | #include <assert.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
const double EPS = 1.e-15;
__global__ void vecAdd(double *d_a, double *d_b, double *d_c, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
d_c[idx] = d_a[idx] + d_b[idx];
}
}
double timer() {
struct timeval tp;
struct timezone tzp;
gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-06);
};
int main (int argc, char**argv) {
assert(argc == 2);
int n = atoi(argv[1]);
//host vecs
double *h_a, *h_b, *h_c;
size_t bytes = n * sizeof(double);
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
for (int i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
double t1 = timer();
for (int i = 0; i < n; i++) {
h_c[i] = h_a[i] + h_b[i];
}
double t2 = timer();
std::cout << "cpu time is: " << t2-t1 << std::endl;
//dev imput vecs
double *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
//num of threads in each thread block
blockSize = 1024;
//num of thread blocks in grid
gridSize = (n-1)/blockSize + 1;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
double t3 = timer();
cudaEventRecord(start);
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
double t4 = timer();
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
std::cout << "(Cuda runtime) Gpu time is: " << t4-t3 << std::endl;
std::cout << "(Timer) Gpu time is: " << ms << std::endl;
for (int i = 0; i < n; i++) {
if (abs(h_c[i]-h_a[i]-h_b[i]) >= EPS) {
std::cout << "CHECK FAILED!" << std::endl;
std::cout << "DIFFER: " << abs(h_c[i] - h_a[i] - h_b[i]) << std::endl;
break;
}
}
std::cout << "Check completed!" << std::endl;
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
9254c3a6c092b6de187b440cd20cd54fd7ca882f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 4
#define MIN 1
#define MAX 9
void random_ints(int a[][N]){
// Remeber this nested for loop is single line so it just ends with ';', it doesn't need '{' and '}'
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
a[i][j] = rand() % (MAX + 1 - MIN) + MIN;
}
void print(int a[][N]){
for(int i=0; i<N; i++){
for(int j=0; j<N; j++)
printf("%2d", a[i][j]);
printf("\n");
}
}
__global__ void matAdd(int a[][N], int b[][N], int c[][N]){
// insert your code here
}
int main(void) {
int a[N][N], b[N][N], c[N][N]; // host copies of a, b, c
int (*d_a)[N], (*d_b)[N], (*d_c)[N]; // device copies of a, b, c
int size = (N * N) * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
random_ints(a);
random_ints(b);
// Copy inputs to device
hipMemcpy(d_a, a, size, cudaMemHostToDevice);
hipMemcpy(d_b, b, size, cudaMemHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
// Launch add() kernal on GPU with N blocks
hipLaunchKernelGGL(( matAdd), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Print Result
print(a); printf("+\n"); print(b); printf("=\n"); print(c);
// Cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | 9254c3a6c092b6de187b440cd20cd54fd7ca882f.cu | #include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define N 4
#define MIN 1
#define MAX 9
void random_ints(int a[][N]){
// Remeber this nested for loop is single line so it just ends with ';', it doesn't need '{' and '}'
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
a[i][j] = rand() % (MAX + 1 - MIN) + MIN;
}
void print(int a[][N]){
for(int i=0; i<N; i++){
for(int j=0; j<N; j++)
printf("%2d", a[i][j]);
printf("\n");
}
}
__global__ void matAdd(int a[][N], int b[][N], int c[][N]){
// insert your code here
}
int main(void) {
int a[N][N], b[N][N], c[N][N]; // host copies of a, b, c
int (*d_a)[N], (*d_b)[N], (*d_c)[N]; // device copies of a, b, c
int size = (N * N) * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
random_ints(a);
random_ints(b);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N, N);
// Launch add() kernal on GPU with N blocks
matAdd<<<numBlocks, threadsPerBlock>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Print Result
print(a); printf("+\n"); print(b); printf("=\n"); print(c);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
a840c7183fb40133ae57a51c6699b9610d71d940.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Cource - " CUDA "
Task 2:
.
: , , .
, :
. .
libpng (man libpng).
/usr/share/doc/libpng12-dev/examples/.
Written by Pavel Santaev
*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <png.h>
#include <libpng.h>
void abort(const char * s, ...);
__device__ png_byte * getPixel(png_byte * img, int w, int h,
int x, int y, size_t pixelSize){
int idx = y * w + x;
return &(img[idx * pixelSize]);
}
__device__ void setPixel(png_byte * pxIn, png_byte * pxOut,
size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = pxIn[i];
}
}
__device__ void addPixel(png_byte * pxIn, png_byte * pxOut,
double alpha, double betta, size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = (png_byte)(((double)pxOut[i]) * betta + (((double)pxIn[i]) * alpha));
}
}
__device__ void setPixelForRobertFilter(
png_byte * img, png_byte * imgOut,
int width, int height,
int x, int y, size_t pixelSize){
int idx[][2] =
{
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {0, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}
};
int sobel[] =
{
1, 0, 0,
0, -1, 0,
0, 0, 0
};
png_byte * pxOut =
getPixel(imgOut, width, height, x, y, pixelSize);
png_byte pxOutLoc[32] = {0};
for (int i = 0; i < 9; i++){
png_byte * pxIn =
getPixel(img, width, height,
x + idx[i][0], y + idx[i][1], pixelSize);
addPixel(pxIn, pxOutLoc, ((double)sobel[i]) / 2, 1, pixelSize);
}
addPixel(pxOutLoc, pxOutLoc, 0, 2, pixelSize);
setPixel(pxOutLoc, pxOut, pixelSize);
}
__global__ void filter(png_byte * img, png_byte * imgOut,
int width, int height, size_t pixelSize){
size_t blockPxX = width / gridDim.x + 1;
size_t threadPxY = height / blockDim.x + 1;
size_t startX = blockPxX * blockIdx.x;
size_t startY = threadPxY * threadIdx.x;
for (int i = 0; i < blockPxX; i++){
for (int j = 0; j < threadPxY; j++){
int x = startX + i;
int y = startY + j;
if (width > x && height > y){
png_byte * pxOut = getPixel(imgOut, width,
height, x, y, pixelSize);
png_byte * pxIn = getPixel(img, width,
height, x, y, pixelSize);
setPixelForRobertFilter(img, imgOut,
width, height, x, y, pixelSize);
pxOut[3] = 255;
/*png_byte * pxOut = getPixel(imgOut, width,
height, x, y, pixelSize);
png_byte * pxIn = getPixel(img, width,
height, x, y, pixelSize);
setPixel(pxIn, pxOut, pixelSize);*/
}
}
}
}
bool initCuda(){
int deviceCount = 0;
hipError_t error;
error = hipGetDeviceCount(&deviceCount);
if (hipSuccess != error){
printf("Error in hipGetDeviceCount: %s\n", hipGetErrorString(error));
return false;
}
printf("hipGetDeviceCount = %x\n", deviceCount);
int deviceID = 0;
hipDeviceProp_t devProp;
error = hipGetDeviceProperties(&devProp, deviceID);
if (hipSuccess != error){
printf("Error in hipGetDeviceProperties: %d\n", hipGetErrorString(error));
return false;
}
hipSetDevice(deviceID);
return true;
}
void copyPngToDevice(png_bytep * row_pointers){
}
int main(int argc, char ** args){
//hipError_t error;
png_structp png_ptr;
png_infop info_ptr;
png_bytep * row_pointers;
png_bytep * row_pointers_res;
// args
char * file_name;
if (argc > 1){
file_name = args[1];
} else {
abort("You should to add fileName to args.\n ./out [fileName]");
}
if (!initCuda()){
return 0;
}
openPng(file_name, &png_ptr, &info_ptr, &row_pointers);
int width = png_get_image_width(png_ptr, info_ptr);
int height = png_get_image_height(png_ptr, info_ptr);
png_byte color_type = png_get_color_type(png_ptr, info_ptr);
png_byte bit_depth = png_get_bit_depth(png_ptr, info_ptr);
// alloc memory for device
png_byte * row_pointers_device;
png_byte * row_pointers_device_out;
size_t rowSize = png_get_rowbytes(png_ptr,info_ptr);
hipMalloc(&row_pointers_device, height * rowSize);
hipMalloc(&row_pointers_device_out, height * rowSize);
// copy png to device
for (int i = 0; i < height; i++){
hipMemcpy(&(row_pointers_device[i * rowSize]), row_pointers[i],
rowSize, hipMemcpyHostToDevice);
}
//hipMemcpy(row_pointers, row_pointers_device, size, hipMemcpyDeviceToHost);
dim3 threads = dim3(16, 1);
dim3 blocks = dim3(16, 1);
hipLaunchKernelGGL(( filter), dim3(blocks), dim3(threads), 0, 0, row_pointers_device,
row_pointers_device_out, width, height, rowSize / width);
// copy res png to host
row_pointers_res = (png_bytep*) malloc(sizeof(png_bytep) * height);
for (int y = 0; y < height; y++){
row_pointers_res[y] =
(png_byte*) malloc(rowSize);
}
for (int i = 0; i < height; i++){
hipMemcpy(row_pointers_res[i], &(row_pointers_device_out[i * rowSize]),
rowSize, hipMemcpyDeviceToHost);
}
// save png
savePng("outImg.png", png_ptr, info_ptr, row_pointers_res);
// free memory
hipFree(row_pointers_device);
hipFree(row_pointers_device_out);
for (int y=0; y<height; y++){
free(row_pointers[y]);
}
free(row_pointers);
printf("\nFinished\n");
}
| a840c7183fb40133ae57a51c6699b9610d71d940.cu | /*
Cource - "Разработка приложений на CUDA "
Task 2:
Реализовать программу для накладывания фильтров на изображения.
Возможные фильтры: размытие, выделение границ, избавление от шума.
Реализовать два варианта программы, а именно: с применением
разделяемой памяти и текстур. Сравнить время.
Для работы с графическими файлами рекомендуется использовать libpng (man libpng).
Примеры использования библиотеки в /usr/share/doc/libpng12-dev/examples/.
Written by Pavel Santaev
*/
#include <stdio.h>
#include <unistd.h>
#include <math.h>
#include <png.h>
#include <libpng.h>
void abort(const char * s, ...);
__device__ png_byte * getPixel(png_byte * img, int w, int h,
int x, int y, size_t pixelSize){
int idx = y * w + x;
return &(img[idx * pixelSize]);
}
__device__ void setPixel(png_byte * pxIn, png_byte * pxOut,
size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = pxIn[i];
}
}
__device__ void addPixel(png_byte * pxIn, png_byte * pxOut,
double alpha, double betta, size_t pixelSize){
for (int i = 0; i < pixelSize; i++){
pxOut[i] = (png_byte)(((double)pxOut[i]) * betta + (((double)pxIn[i]) * alpha));
}
}
__device__ void setPixelForRobertFilter(
png_byte * img, png_byte * imgOut,
int width, int height,
int x, int y, size_t pixelSize){
int idx[][2] =
{
{-1, -1}, {0, -1}, {1, -1},
{-1, 0}, {0, 0}, {1, 0},
{-1, 1}, {0, 1}, {1, 1}
};
int sobel[] =
{
1, 0, 0,
0, -1, 0,
0, 0, 0
};
png_byte * pxOut =
getPixel(imgOut, width, height, x, y, pixelSize);
png_byte pxOutLoc[32] = {0};
for (int i = 0; i < 9; i++){
png_byte * pxIn =
getPixel(img, width, height,
x + idx[i][0], y + idx[i][1], pixelSize);
addPixel(pxIn, pxOutLoc, ((double)sobel[i]) / 2, 1, pixelSize);
}
addPixel(pxOutLoc, pxOutLoc, 0, 2, pixelSize);
setPixel(pxOutLoc, pxOut, pixelSize);
}
__global__ void filter(png_byte * img, png_byte * imgOut,
int width, int height, size_t pixelSize){
size_t blockPxX = width / gridDim.x + 1;
size_t threadPxY = height / blockDim.x + 1;
size_t startX = blockPxX * blockIdx.x;
size_t startY = threadPxY * threadIdx.x;
for (int i = 0; i < blockPxX; i++){
for (int j = 0; j < threadPxY; j++){
int x = startX + i;
int y = startY + j;
if (width > x && height > y){
png_byte * pxOut = getPixel(imgOut, width,
height, x, y, pixelSize);
png_byte * pxIn = getPixel(img, width,
height, x, y, pixelSize);
setPixelForRobertFilter(img, imgOut,
width, height, x, y, pixelSize);
pxOut[3] = 255;
/*png_byte * pxOut = getPixel(imgOut, width,
height, x, y, pixelSize);
png_byte * pxIn = getPixel(img, width,
height, x, y, pixelSize);
setPixel(pxIn, pxOut, pixelSize);*/
}
}
}
}
bool initCuda(){
int deviceCount = 0;
cudaError_t error;
error = cudaGetDeviceCount(&deviceCount);
if (cudaSuccess != error){
printf("Error in cudaGetDeviceCount: %s\n", cudaGetErrorString(error));
return false;
}
printf("cudaGetDeviceCount = %x\n", deviceCount);
int deviceID = 0;
cudaDeviceProp devProp;
error = cudaGetDeviceProperties(&devProp, deviceID);
if (cudaSuccess != error){
printf("Error in cudaGetDeviceProperties: %d\n", cudaGetErrorString(error));
return false;
}
cudaSetDevice(deviceID);
return true;
}
void copyPngToDevice(png_bytep * row_pointers){
}
int main(int argc, char ** args){
//cudaError_t error;
png_structp png_ptr;
png_infop info_ptr;
png_bytep * row_pointers;
png_bytep * row_pointers_res;
// args
char * file_name;
if (argc > 1){
file_name = args[1];
} else {
abort("You should to add fileName to args.\n ./out [fileName]");
}
if (!initCuda()){
return 0;
}
openPng(file_name, &png_ptr, &info_ptr, &row_pointers);
int width = png_get_image_width(png_ptr, info_ptr);
int height = png_get_image_height(png_ptr, info_ptr);
png_byte color_type = png_get_color_type(png_ptr, info_ptr);
png_byte bit_depth = png_get_bit_depth(png_ptr, info_ptr);
// alloc memory for device
png_byte * row_pointers_device;
png_byte * row_pointers_device_out;
size_t rowSize = png_get_rowbytes(png_ptr,info_ptr);
cudaMalloc(&row_pointers_device, height * rowSize);
cudaMalloc(&row_pointers_device_out, height * rowSize);
// copy png to device
for (int i = 0; i < height; i++){
cudaMemcpy(&(row_pointers_device[i * rowSize]), row_pointers[i],
rowSize, cudaMemcpyHostToDevice);
}
//cudaMemcpy(row_pointers, row_pointers_device, size, cudaMemcpyDeviceToHost);
dim3 threads = dim3(16, 1);
dim3 blocks = dim3(16, 1);
filter<<<blocks, threads>>>(row_pointers_device,
row_pointers_device_out, width, height, rowSize / width);
// copy res png to host
row_pointers_res = (png_bytep*) malloc(sizeof(png_bytep) * height);
for (int y = 0; y < height; y++){
row_pointers_res[y] =
(png_byte*) malloc(rowSize);
}
for (int i = 0; i < height; i++){
cudaMemcpy(row_pointers_res[i], &(row_pointers_device_out[i * rowSize]),
rowSize, cudaMemcpyDeviceToHost);
}
// save png
savePng("outImg.png", png_ptr, info_ptr, row_pointers_res);
// free memory
cudaFree(row_pointers_device);
cudaFree(row_pointers_device_out);
for (int y=0; y<height; y++){
free(row_pointers[y]);
}
free(row_pointers);
printf("\nFinished\n");
}
|
9bc036f0b7a0ab26d7000cdc6886c3d370eb8b1a.hip | // !!! This is a file automatically generated by hipify!!!
//fail: assertion
//--blockDim=64 --gridDim=64 --no-inline
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <assert.h>
#define N 2//64
__device__ int f(int x) {
return x + 1;
}
__global__ void foo(int *y) {
*y = f(2);
}
| 9bc036f0b7a0ab26d7000cdc6886c3d370eb8b1a.cu | //fail: assertion
//--blockDim=64 --gridDim=64 --no-inline
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <assert.h>
#define N 2//64
__device__ int f(int x) {
return x + 1;
}
__global__ void foo(int *y) {
*y = f(2);
}
|
96a0fb5b5a8600dc45551b0bd4f30caeeebc2090.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cstdint>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
hipArray* dev_diffuseTex = NULL;
hipTextureObject_t dev_diffuseTexObj = 0;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 eyePos;
glm::vec3 eyeNor;
float z, ssao;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
// glm::vec3 eyePos; // eye space position used for shading
// glm::vec3 eyeNor;
// VertexAttributeTexcoord texcoord0;
// TextureData* dev_diffuseTex;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
hipArray *dev_diffuseTex;
hipTextureObject_t dev_diffuseTexObj;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static unsigned long long *dev_depth = NULL;
static glm::ivec2 *dev_tiles_min = NULL, *dev_tiles_max = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static hipTextureObject_t dev_ssaoTexObj = 0;
static hipArray *dev_ssaoTexArray = nullptr;
static glm::vec3 *dev_ssaoKernel = nullptr;
static hipArray *dev_ssaoOutArray = nullptr;
static hipSurfaceObject_t dev_ssaoOutSurfObj;
static hipTextureObject_t dev_ssaoOutTexObj;
void ssaoInit(int nKernel, int nRandom, int width, int height) {
std::uniform_real_distribution<float> dist(0.0, 1.0);
std::default_random_engine rng;
// generate sampling kernel
glm::vec3 *kern = new glm::vec3[nKernel*nKernel];
float scale = 1.0f / (nKernel*nKernel);
for (int i = 0; i < nKernel*nKernel; i++) {
kern[i].x = 2.0f*dist(rng) - 1.0f;
kern[i].y = 2.0f*dist(rng) - 1.0f;
kern[i].z = dist(rng);
kern[i] = glm::normalize(kern[i]) * scale;
scale = 0.1f + 0.9f*scale*scale;
}
hipMalloc(&dev_ssaoKernel, nKernel*nKernel*sizeof(glm::vec3));
hipMemcpy(dev_ssaoKernel, kern, nKernel*nKernel*sizeof(glm::vec3), hipMemcpyHostToDevice);
delete kern;
// generate randomization texture
float4 *noise = new float4[nRandom*nRandom];
for (int i = 0; i < nRandom*nRandom; i++) {
noise[i].x = 2.0f*dist(rng) - 1.0f;
noise[i].y = 2.0f*dist(rng) - 1.0f;
noise[i].z = noise[i].w = 0.0f;
}
hipChannelFormatDesc channel = hipCreateChannelDesc<float4>();
hipMallocArray(&dev_ssaoTexArray, &channel, nRandom, nRandom);
hipMemcpyToArray(dev_ssaoTexArray, 0, 0, noise, nRandom*nRandom*sizeof(float4), hipMemcpyHostToDevice);
checkCUDAError("Set Texture Image data");
delete noise;
// Specify texture
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = dev_ssaoTexArray;
// Specify texture object parameters
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 1;
// Create texture object
hipCreateTextureObject(&dev_ssaoTexObj, &resDesc, &texDesc, NULL);
// Create output array
channel = hipCreateChannelDesc<float>();
hipMallocArray(&dev_ssaoOutArray, &channel, width, height);
resDesc.res.array.array = dev_ssaoOutArray;
hipCreateSurfaceObject(&dev_ssaoOutSurfObj, &resDesc);
hipCreateTextureObject(&dev_ssaoOutTexObj, &resDesc, &texDesc, NULL);
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(unsigned long long));
ssaoInit(8, 4, w, h);
checkCUDAError("rasterizeInit");
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = nullptr;
VertexAttributePosition* dev_position = nullptr;
VertexAttributeNormal* dev_normal = nullptr;
VertexAttributeTexcoord* dev_texcoord0 = nullptr;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
hipArray* dev_diffuseTex = NULL;
hipTextureObject_t dev_diffuseTexObj = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
// convert image to rgba
std::vector<TextureData> rgbaImg;
for (int i = 0; i < image.image.size(); i++) {
rgbaImg.push_back(image.image[i]);
if(i % 3 == 2)
rgbaImg.push_back(0);
}
size_t s =rgbaImg.size() * sizeof(TextureData);
//printf("img size %d\n", s);
hipChannelFormatDesc channel = hipCreateChannelDesc(8,8,8,8,hipChannelFormatKindUnsigned);
hipMallocArray(&dev_diffuseTex, &channel, image.width, image.height);
hipMemcpyToArray(dev_diffuseTex, 0, 0, &rgbaImg[0], s, hipMemcpyHostToDevice);
checkCUDAError("Set Texture Image data");
// Specify texture
hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = dev_diffuseTex;
// Specify texture object parameters
hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.filterMode = hipFilterModeLinear;
texDesc.readMode = hipReadModeNormalizedFloat;
texDesc.normalizedCoords = 1;
// Create texture object
hipCreateTextureObject(&dev_diffuseTexObj, &resDesc, &texDesc, NULL);
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
dev_diffuseTexObj,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
hipMalloc(&dev_tiles_min, totalNumPrimitives * sizeof(glm::ivec2));
hipMalloc(&dev_tiles_max, totalNumPrimitives * sizeof(glm::ivec2));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
VertexOut &vOut = primitive.dev_verticesOut[vid];
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
vOut.pos = MVP*glm::vec4(primitive.dev_position[vid], 1);
vOut.pos.x /= vOut.pos.w;
vOut.pos.y /= vOut.pos.w;
vOut.pos.z /= vOut.pos.w;
vOut.pos.x = 0.5f * width * (vOut.pos.x + 1.0f);
vOut.pos.y = 0.5f * height * (1.0f - vOut.pos.y);
vOut.eyePos = glm::vec3(MV*glm::vec4(primitive.dev_position[vid], 1));
vOut.eyeNor = glm::normalize(MV_normal*primitive.dev_normal[vid]);
vOut.dev_diffuseTex = primitive.dev_diffuseTex;
vOut.dev_diffuseTexObj = primitive.dev_diffuseTexObj;
if (primitive.dev_texcoord0)
vOut.texcoord0 = primitive.dev_texcoord0[vid];
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
// based on http://stereopsis.com/radix.html
// maps float to uint32 in a way that preserves order. stores in the upper half
// of a uint64 , and the index of a primitive as the lower 32, so atomicMin can both
// compare based on depth and store the index
__device__ static inline uint64_t FloatFlip(float f, int i)
{
uint32_t fi = *((uint32_t*)&f), mask = (fi & 0x80000000) ? 0xFFFFFFFF : 0x80000000;
return (((uint64_t)(fi ^ mask)) << 32) | i;
}
__device__ static inline float FloatUnflip(uint64_t u)
{
u >>= 32;
uint32_t mask = (u & 0x80000000) ? 0xFFFFFFFF : 0x80000000, fi = u ^ mask;
return *((float*)&fi);
}
__global__ void depthPass(int numPrimitives, Primitive *dev_primitives, int w, int h, unsigned long long *depth) {
int pIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (pIdx >= numPrimitives)
return;
Primitive &p = dev_primitives[pIdx];
glm::vec3 tri[3];
tri[0] = glm::vec3(p.v[0].pos);
tri[1] = glm::vec3(p.v[1].pos);
tri[2] = glm::vec3(p.v[2].pos);
AABB aabb;
getAABBForTriangle(tri, aabb);
for (int j = aabb.min[1]; j < aabb.max[1] && j < h && j >= 0; j++) {
for (int i = aabb.min[0]; i < aabb.max[0] && i < w && i >= 0; i++) {
glm::vec2 coord(i,j);
glm::vec3 bary = calculateBarycentricCoordinate(tri, coord);
if (isBarycentricCoordInBounds(bary))
atomicMin(&depth[i+w*j], FloatFlip(getZAtCoordinate(bary, tri),pIdx));
}}
}
__device__ void shadeFragment(unsigned int i, unsigned int j, const Primitive &p, glm::vec3 &out) {
glm::vec3 tri[3];
tri[0] = glm::vec3(p.v[0].pos);
tri[1] = glm::vec3(p.v[1].pos);
tri[2] = glm::vec3(p.v[2].pos);
glm::vec2 coord(i,j);
glm::vec3 bary = calculateBarycentricCoordinate(tri, coord);
// lambert
glm::vec3 fragDir = glm::normalize(bary.x*p.v[0].eyePos + bary.y*p.v[1].eyePos + bary.z*p.v[2].eyePos);
glm::vec3 fragNrm = glm::normalize(bary.x*p.v[0].eyeNor + bary.y*p.v[1].eyeNor + bary.z*p.v[2].eyeNor);
glm::vec3 lambert = glm::clamp(-glm::vec3(glm::dot(fragDir, fragNrm)), 0.0f, 1.0f);
glm::vec3 texBary = bary / glm::vec3(p.v[0].pos[3], p.v[1].pos[3], p.v[2].pos[3]);
glm::vec2 st = texBary[0]*p.v[0].texcoord0 + texBary[1]*p.v[1].texcoord0 + texBary[2]*p.v[2].texcoord0;
float norm = texBary[0] + texBary[1] + texBary[2];
if (p.v[0].dev_diffuseTex) {
float4 rgba = tex2D<float4>(p.v[0].dev_diffuseTexObj, st.x / norm, st.y / norm);
lambert *= glm::vec3(rgba.x,rgba.y,rgba.z);
}
out = lambert;
}
__global__ void _fragRasterize(int numPrimitives, Primitive *dev_primitives, Fragment *dev_fragments, int w, int h, unsigned long long *depth, glm::vec3 *framebuffer) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
int pIdx = depth[i+w*j];
if (pIdx < 0)
return;
Primitive &p = dev_primitives[pIdx];
Fragment &f = dev_fragments[i+w*j];
glm::vec3 tri[3];
tri[0] = glm::vec3(p.v[0].pos);
tri[1] = glm::vec3(p.v[1].pos);
tri[2] = glm::vec3(p.v[2].pos);
glm::vec2 coord(i,j);
glm::vec3 bary = calculateBarycentricCoordinate(tri, coord);
f.z = FloatUnflip(depth[i+w*j]);
// lambert
f.eyePos = glm::normalize(bary.x*p.v[0].eyePos
+ bary.y*p.v[1].eyePos
+ bary.z*p.v[2].eyePos);
f.eyeNor = glm::normalize(bary.x*p.v[0].eyeNor
+ bary.y*p.v[1].eyeNor
+ bary.z*p.v[2].eyeNor);
glm::vec3 lambert = glm::clamp(-glm::vec3(glm::dot(f.eyePos, f.eyeNor)), 0.0f, 1.0f);
glm::vec3 texBary = bary / glm::vec3(p.v[0].pos[3], p.v[1].pos[3], p.v[2].pos[3]);
glm::vec2 st0 = texBary[0]*p.v[0].texcoord0
+ texBary[1]*p.v[1].texcoord0
+ texBary[2]*p.v[2].texcoord0;
st0 /= texBary[0] + texBary[1] + texBary[2];
if (p.v[0].dev_diffuseTex) {
float4 rgba = tex2D<float4>(p.v[0].dev_diffuseTexObj, st0.x, st0.y);
lambert *= glm::vec3(rgba.x,rgba.y,rgba.z);
}
framebuffer[i + w*j] = lambert;
}
__device__ static inline float smoothstep(float a, float b, float x) {
x = (x-a)/(b-a);
x = (x < 0.0f) ? 0.0f : ((x > 1.0f) ? 1.0f : x);
return x*x*(3.0f - 2.0f*x);
}
__global__ void ssaoPass(int w, int h, Fragment *dev_fragments, const glm::mat4 P,
int ssaoTexSize, int ssaoTexObj,
int ssaoKernSize, const glm::vec3 *ssaoKern,
float ssaoRadius, unsigned long long *depth, glm::vec3 *framebuffer) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
Fragment &f = dev_fragments[i + w*j];
float4 rVec4 = tex2D<float4>(ssaoTexObj, float(i)/ssaoTexSize, float(j)/ssaoTexSize);
glm::vec3 rVec(rVec4.x, rVec4.y, 0.0);
glm::vec3 tVec = glm::normalize(rVec - f.eyeNor * glm::dot(rVec, f.eyeNor));
glm::vec3 bVec = glm::cross(f.eyeNor, tVec);
glm::mat3 TBN(tVec, bVec, f.eyeNor);
float ssao = 0.0f;
for (int k = 0; k < ssaoKernSize*ssaoKernSize; k++) {
glm::vec4 samp = P*glm::vec4(ssaoRadius*TBN*ssaoKern[k] + f.eyePos, 1.0f);
int si = 0.5f * w * (samp.x + 1.0f);
int sj = 0.5f * h * (1.0f - samp.y);
float z = dev_fragments[si + w*sj].z;
if (z > f.z)
ssao += smoothstep(0.0, 1.0, ssaoRadius / fabs(f.z - z));
}
ssao = 1.0 - ssao/(ssaoKernSize*ssaoKernSize);
framebuffer[i+w*j] = glm::vec3(ssao);
}
__global__ void ssaoPassShared(int w, int h, Fragment *dev_fragments, const glm::mat4 P,
int ssaoTexSize, int ssaoTexObj,
int ssaoKernSize, const glm::vec3 *ssaoKern, float ssaoRadius) {
extern __shared__ Fragment sFrag[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
// load the fragment into shared memory
int tIdx = threadIdx.x + blockDim.x * threadIdx.y;
sFrag[tIdx] = dev_fragments[i + w*j];
Fragment &f = sFrag[tIdx];
float4 rVec4 = tex2D<float4>(ssaoTexObj, float(i)/ssaoTexSize, float(j)/ssaoTexSize);
glm::vec3 rVec(rVec4.x, rVec4.y, 0.0);
glm::vec3 tVec = glm::normalize(rVec - f.eyeNor * glm::dot(rVec, f.eyeNor));
glm::vec3 bVec = glm::cross(f.eyeNor, tVec);
glm::mat3 TBN(tVec, bVec, f.eyeNor);
f.ssao = 0.0;
for (int k = 0; k < ssaoKernSize*ssaoKernSize; k++) {
glm::vec4 samp = P*glm::vec4(ssaoRadius*TBN*ssaoKern[k] + f.eyePos, 1.0f);
int si = glm::floor(0.5f * w * (samp.x + 1.0f));
int sj = glm::floor(0.5f * h * (1.0f - samp.y));
if (si >= 0 && sj >= 0 && si < w && sj < h) {
float z = dev_fragments[si + w*sj].z;
if (z > f.z)
f.ssao += smoothstep(0.0, 1.0, ssaoRadius / fabs(f.z - z));
}
}
dev_fragments[i+w*j].ssao = 1.0 - f.ssao/(ssaoKernSize*ssaoKernSize);
}
__global__ void ssaoBlur(int w, int h, Fragment *dev_fragments, int ssaoTexSize, glm::vec3 *framebuffer) {
extern __shared__ Fragment sFrag[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
// load the fragment into shared memory
int tIdx = threadIdx.x + blockDim.x * threadIdx.y;
int idx = i + w*j;
sFrag[tIdx] = dev_fragments[idx];
float ssao = 0.0f;
int n = 0;
for (int sj = j-ssaoTexSize/2; sj < j+ssaoTexSize/2; sj++) {
for (int si = i-ssaoTexSize/2; si < i+ssaoTexSize/2; si++) {
if (si >= 0 && sj >= 0 && si < w && sj < h)
ssao += dev_fragments[si + w*sj].ssao;
else
continue;
n++;
}}
ssao /= n;
framebuffer[idx] *= ssao;
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal, const glm::mat4 &P) {
int sideLength2d = 16;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipLaunchKernelGGL(( _vertexTransformAndAssembly) , dim3(numBlocksForVertices), dim3(numThreadsPerBlock) , 0, 0, p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
hipLaunchKernelGGL(( _primitiveAssembly) , dim3(numBlocksForIndices), dim3(numThreadsPerBlock) , 0, 0,
p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipMemset(dev_depth, 0xFF, width * height * sizeof(unsigned long long));
hipMemset(dev_framebuffer, 0, width*height*sizeof(glm::vec3));
dim3 blockDim1d(1024);
dim3 blockCnt1d((totalNumPrimitives + blockDim1d.x - 1)/blockDim1d.x);
hipLaunchKernelGGL(( depthPass), dim3(blockCnt1d),dim3(blockDim1d), 0, 0, totalNumPrimitives, dev_primitives, width, height, dev_depth);
checkCUDAError("fragDepthFind");
hipLaunchKernelGGL(( _fragRasterize), dim3(blockCount2d),dim3(blockSize2d), 0, 0, totalNumPrimitives, dev_primitives, dev_fragmentBuffer, width, height, dev_depth, dev_framebuffer);
checkCUDAError("fragRasterize");
//hipMemset(dev_framebuffer, 0, width*height*sizeof(glm::vec3));
int smSize = sideLength2d*sideLength2d*sizeof(Fragment);
hipLaunchKernelGGL(( ssaoPassShared), dim3(blockCount2d),dim3(blockSize2d),smSize, 0, width, height, dev_fragmentBuffer, P, 4, dev_ssaoTexObj, 8, dev_ssaoKernel, 5.0);
checkCUDAError("fragRasterize");
hipLaunchKernelGGL(( ssaoBlur), dim3(blockCount2d),dim3(blockSize2d),smSize, 0, width, height, dev_fragmentBuffer, 4, dev_framebuffer);
checkCUDAError("fragRasterize");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFreeArray(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_tiles_min);
hipFree(dev_tiles_max);
dev_tiles_min = dev_tiles_max = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = nullptr;
checkCUDAError("rasterize Free");
}
| 96a0fb5b5a8600dc45551b0bd4f30caeeebc2090.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cstdint>
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
// TODO: add new attributes to your VertexOut
// The attributes listed below might be useful,
// but always feel free to modify on your own
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor; // eye space normal used for shading, cuz normal will go wrong after perspective transformation
// glm::vec3 col;
glm::vec2 texcoord0;
cudaArray* dev_diffuseTex = NULL;
cudaTextureObject_t dev_diffuseTexObj = 0;
// ...
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 eyePos;
glm::vec3 eyeNor;
float z, ssao;
// TODO: add new attributes to your Fragment
// The attributes listed below might be useful,
// but always feel free to modify on your own
// glm::vec3 eyePos; // eye space position used for shading
// glm::vec3 eyeNor;
// VertexAttributeTexcoord texcoord0;
// TextureData* dev_diffuseTex;
// ...
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
cudaArray *dev_diffuseTex;
cudaTextureObject_t dev_diffuseTexObj;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
// TODO: add more attributes when needed
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
static unsigned long long *dev_depth = NULL;
static glm::ivec2 *dev_tiles_min = NULL, *dev_tiles_max = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
static cudaTextureObject_t dev_ssaoTexObj = 0;
static cudaArray *dev_ssaoTexArray = nullptr;
static glm::vec3 *dev_ssaoKernel = nullptr;
static cudaArray *dev_ssaoOutArray = nullptr;
static cudaSurfaceObject_t dev_ssaoOutSurfObj;
static cudaTextureObject_t dev_ssaoOutTexObj;
void ssaoInit(int nKernel, int nRandom, int width, int height) {
std::uniform_real_distribution<float> dist(0.0, 1.0);
std::default_random_engine rng;
// generate sampling kernel
glm::vec3 *kern = new glm::vec3[nKernel*nKernel];
float scale = 1.0f / (nKernel*nKernel);
for (int i = 0; i < nKernel*nKernel; i++) {
kern[i].x = 2.0f*dist(rng) - 1.0f;
kern[i].y = 2.0f*dist(rng) - 1.0f;
kern[i].z = dist(rng);
kern[i] = glm::normalize(kern[i]) * scale;
scale = 0.1f + 0.9f*scale*scale;
}
cudaMalloc(&dev_ssaoKernel, nKernel*nKernel*sizeof(glm::vec3));
cudaMemcpy(dev_ssaoKernel, kern, nKernel*nKernel*sizeof(glm::vec3), cudaMemcpyHostToDevice);
delete kern;
// generate randomization texture
float4 *noise = new float4[nRandom*nRandom];
for (int i = 0; i < nRandom*nRandom; i++) {
noise[i].x = 2.0f*dist(rng) - 1.0f;
noise[i].y = 2.0f*dist(rng) - 1.0f;
noise[i].z = noise[i].w = 0.0f;
}
cudaChannelFormatDesc channel = cudaCreateChannelDesc<float4>();
cudaMallocArray(&dev_ssaoTexArray, &channel, nRandom, nRandom);
cudaMemcpyToArray(dev_ssaoTexArray, 0, 0, noise, nRandom*nRandom*sizeof(float4), cudaMemcpyHostToDevice);
checkCUDAError("Set Texture Image data");
delete noise;
// Specify texture
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = dev_ssaoTexArray;
// Specify texture object parameters
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 1;
// Create texture object
cudaCreateTextureObject(&dev_ssaoTexObj, &resDesc, &texDesc, NULL);
// Create output array
channel = cudaCreateChannelDesc<float>();
cudaMallocArray(&dev_ssaoOutArray, &channel, width, height);
resDesc.res.array.array = dev_ssaoOutArray;
cudaCreateSurfaceObject(&dev_ssaoOutSurfObj, &resDesc);
cudaCreateTextureObject(&dev_ssaoOutTexObj, &resDesc, &texDesc, NULL);
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(unsigned long long));
ssaoInit(8, 4, w, h);
checkCUDAError("rasterizeInit");
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = nullptr;
VertexAttributePosition* dev_position = nullptr;
VertexAttributeNormal* dev_normal = nullptr;
VertexAttributeTexcoord* dev_texcoord0 = nullptr;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
cudaArray* dev_diffuseTex = NULL;
cudaTextureObject_t dev_diffuseTexObj = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
// convert image to rgba
std::vector<TextureData> rgbaImg;
for (int i = 0; i < image.image.size(); i++) {
rgbaImg.push_back(image.image[i]);
if(i % 3 == 2)
rgbaImg.push_back(0);
}
size_t s =rgbaImg.size() * sizeof(TextureData);
//printf("img size %d\n", s);
cudaChannelFormatDesc channel = cudaCreateChannelDesc(8,8,8,8,cudaChannelFormatKindUnsigned);
cudaMallocArray(&dev_diffuseTex, &channel, image.width, image.height);
cudaMemcpyToArray(dev_diffuseTex, 0, 0, &rgbaImg[0], s, cudaMemcpyHostToDevice);
checkCUDAError("Set Texture Image data");
// Specify texture
cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = dev_diffuseTex;
// Specify texture object parameters
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModeLinear;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.normalizedCoords = 1;
// Create texture object
cudaCreateTextureObject(&dev_diffuseTexObj, &resDesc, &texDesc, NULL);
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
dev_diffuseTexObj,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
cudaMalloc(&dev_tiles_min, totalNumPrimitives * sizeof(glm::ivec2));
cudaMalloc(&dev_tiles_max, totalNumPrimitives * sizeof(glm::ivec2));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
VertexOut &vOut = primitive.dev_verticesOut[vid];
// TODO: Apply vertex transformation here
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
// Then divide the pos by its w element to transform into NDC space
// Finally transform x and y to viewport space
vOut.pos = MVP*glm::vec4(primitive.dev_position[vid], 1);
vOut.pos.x /= vOut.pos.w;
vOut.pos.y /= vOut.pos.w;
vOut.pos.z /= vOut.pos.w;
vOut.pos.x = 0.5f * width * (vOut.pos.x + 1.0f);
vOut.pos.y = 0.5f * height * (1.0f - vOut.pos.y);
vOut.eyePos = glm::vec3(MV*glm::vec4(primitive.dev_position[vid], 1));
vOut.eyeNor = glm::normalize(MV_normal*primitive.dev_normal[vid]);
vOut.dev_diffuseTex = primitive.dev_diffuseTex;
vOut.dev_diffuseTexObj = primitive.dev_diffuseTexObj;
if (primitive.dev_texcoord0)
vOut.texcoord0 = primitive.dev_texcoord0[vid];
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
// TODO: uncomment the following code for a start
// This is primitive assembly for triangles
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
// TODO: other primitive types (point, line)
}
}
// based on http://stereopsis.com/radix.html
// maps float to uint32 in a way that preserves order. stores in the upper half
// of a uint64 , and the index of a primitive as the lower 32, so atomicMin can both
// compare based on depth and store the index
__device__ static inline uint64_t FloatFlip(float f, int i)
{
uint32_t fi = *((uint32_t*)&f), mask = (fi & 0x80000000) ? 0xFFFFFFFF : 0x80000000;
return (((uint64_t)(fi ^ mask)) << 32) | i;
}
__device__ static inline float FloatUnflip(uint64_t u)
{
u >>= 32;
uint32_t mask = (u & 0x80000000) ? 0xFFFFFFFF : 0x80000000, fi = u ^ mask;
return *((float*)&fi);
}
__global__ void depthPass(int numPrimitives, Primitive *dev_primitives, int w, int h, unsigned long long *depth) {
int pIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (pIdx >= numPrimitives)
return;
Primitive &p = dev_primitives[pIdx];
glm::vec3 tri[3];
tri[0] = glm::vec3(p.v[0].pos);
tri[1] = glm::vec3(p.v[1].pos);
tri[2] = glm::vec3(p.v[2].pos);
AABB aabb;
getAABBForTriangle(tri, aabb);
for (int j = aabb.min[1]; j < aabb.max[1] && j < h && j >= 0; j++) {
for (int i = aabb.min[0]; i < aabb.max[0] && i < w && i >= 0; i++) {
glm::vec2 coord(i,j);
glm::vec3 bary = calculateBarycentricCoordinate(tri, coord);
if (isBarycentricCoordInBounds(bary))
atomicMin(&depth[i+w*j], FloatFlip(getZAtCoordinate(bary, tri),pIdx));
}}
}
__device__ void shadeFragment(unsigned int i, unsigned int j, const Primitive &p, glm::vec3 &out) {
glm::vec3 tri[3];
tri[0] = glm::vec3(p.v[0].pos);
tri[1] = glm::vec3(p.v[1].pos);
tri[2] = glm::vec3(p.v[2].pos);
glm::vec2 coord(i,j);
glm::vec3 bary = calculateBarycentricCoordinate(tri, coord);
// lambert
glm::vec3 fragDir = glm::normalize(bary.x*p.v[0].eyePos + bary.y*p.v[1].eyePos + bary.z*p.v[2].eyePos);
glm::vec3 fragNrm = glm::normalize(bary.x*p.v[0].eyeNor + bary.y*p.v[1].eyeNor + bary.z*p.v[2].eyeNor);
glm::vec3 lambert = glm::clamp(-glm::vec3(glm::dot(fragDir, fragNrm)), 0.0f, 1.0f);
glm::vec3 texBary = bary / glm::vec3(p.v[0].pos[3], p.v[1].pos[3], p.v[2].pos[3]);
glm::vec2 st = texBary[0]*p.v[0].texcoord0 + texBary[1]*p.v[1].texcoord0 + texBary[2]*p.v[2].texcoord0;
float norm = texBary[0] + texBary[1] + texBary[2];
if (p.v[0].dev_diffuseTex) {
float4 rgba = tex2D<float4>(p.v[0].dev_diffuseTexObj, st.x / norm, st.y / norm);
lambert *= glm::vec3(rgba.x,rgba.y,rgba.z);
}
out = lambert;
}
__global__ void _fragRasterize(int numPrimitives, Primitive *dev_primitives, Fragment *dev_fragments, int w, int h, unsigned long long *depth, glm::vec3 *framebuffer) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
int pIdx = depth[i+w*j];
if (pIdx < 0)
return;
Primitive &p = dev_primitives[pIdx];
Fragment &f = dev_fragments[i+w*j];
glm::vec3 tri[3];
tri[0] = glm::vec3(p.v[0].pos);
tri[1] = glm::vec3(p.v[1].pos);
tri[2] = glm::vec3(p.v[2].pos);
glm::vec2 coord(i,j);
glm::vec3 bary = calculateBarycentricCoordinate(tri, coord);
f.z = FloatUnflip(depth[i+w*j]);
// lambert
f.eyePos = glm::normalize(bary.x*p.v[0].eyePos
+ bary.y*p.v[1].eyePos
+ bary.z*p.v[2].eyePos);
f.eyeNor = glm::normalize(bary.x*p.v[0].eyeNor
+ bary.y*p.v[1].eyeNor
+ bary.z*p.v[2].eyeNor);
glm::vec3 lambert = glm::clamp(-glm::vec3(glm::dot(f.eyePos, f.eyeNor)), 0.0f, 1.0f);
glm::vec3 texBary = bary / glm::vec3(p.v[0].pos[3], p.v[1].pos[3], p.v[2].pos[3]);
glm::vec2 st0 = texBary[0]*p.v[0].texcoord0
+ texBary[1]*p.v[1].texcoord0
+ texBary[2]*p.v[2].texcoord0;
st0 /= texBary[0] + texBary[1] + texBary[2];
if (p.v[0].dev_diffuseTex) {
float4 rgba = tex2D<float4>(p.v[0].dev_diffuseTexObj, st0.x, st0.y);
lambert *= glm::vec3(rgba.x,rgba.y,rgba.z);
}
framebuffer[i + w*j] = lambert;
}
__device__ static inline float smoothstep(float a, float b, float x) {
x = (x-a)/(b-a);
x = (x < 0.0f) ? 0.0f : ((x > 1.0f) ? 1.0f : x);
return x*x*(3.0f - 2.0f*x);
}
__global__ void ssaoPass(int w, int h, Fragment *dev_fragments, const glm::mat4 P,
int ssaoTexSize, int ssaoTexObj,
int ssaoKernSize, const glm::vec3 *ssaoKern,
float ssaoRadius, unsigned long long *depth, glm::vec3 *framebuffer) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
Fragment &f = dev_fragments[i + w*j];
float4 rVec4 = tex2D<float4>(ssaoTexObj, float(i)/ssaoTexSize, float(j)/ssaoTexSize);
glm::vec3 rVec(rVec4.x, rVec4.y, 0.0);
glm::vec3 tVec = glm::normalize(rVec - f.eyeNor * glm::dot(rVec, f.eyeNor));
glm::vec3 bVec = glm::cross(f.eyeNor, tVec);
glm::mat3 TBN(tVec, bVec, f.eyeNor);
float ssao = 0.0f;
for (int k = 0; k < ssaoKernSize*ssaoKernSize; k++) {
glm::vec4 samp = P*glm::vec4(ssaoRadius*TBN*ssaoKern[k] + f.eyePos, 1.0f);
int si = 0.5f * w * (samp.x + 1.0f);
int sj = 0.5f * h * (1.0f - samp.y);
float z = dev_fragments[si + w*sj].z;
if (z > f.z)
ssao += smoothstep(0.0, 1.0, ssaoRadius / fabs(f.z - z));
}
ssao = 1.0 - ssao/(ssaoKernSize*ssaoKernSize);
framebuffer[i+w*j] = glm::vec3(ssao);
}
__global__ void ssaoPassShared(int w, int h, Fragment *dev_fragments, const glm::mat4 P,
int ssaoTexSize, int ssaoTexObj,
int ssaoKernSize, const glm::vec3 *ssaoKern, float ssaoRadius) {
extern __shared__ Fragment sFrag[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
// load the fragment into shared memory
int tIdx = threadIdx.x + blockDim.x * threadIdx.y;
sFrag[tIdx] = dev_fragments[i + w*j];
Fragment &f = sFrag[tIdx];
float4 rVec4 = tex2D<float4>(ssaoTexObj, float(i)/ssaoTexSize, float(j)/ssaoTexSize);
glm::vec3 rVec(rVec4.x, rVec4.y, 0.0);
glm::vec3 tVec = glm::normalize(rVec - f.eyeNor * glm::dot(rVec, f.eyeNor));
glm::vec3 bVec = glm::cross(f.eyeNor, tVec);
glm::mat3 TBN(tVec, bVec, f.eyeNor);
f.ssao = 0.0;
for (int k = 0; k < ssaoKernSize*ssaoKernSize; k++) {
glm::vec4 samp = P*glm::vec4(ssaoRadius*TBN*ssaoKern[k] + f.eyePos, 1.0f);
int si = glm::floor(0.5f * w * (samp.x + 1.0f));
int sj = glm::floor(0.5f * h * (1.0f - samp.y));
if (si >= 0 && sj >= 0 && si < w && sj < h) {
float z = dev_fragments[si + w*sj].z;
if (z > f.z)
f.ssao += smoothstep(0.0, 1.0, ssaoRadius / fabs(f.z - z));
}
}
dev_fragments[i+w*j].ssao = 1.0 - f.ssao/(ssaoKernSize*ssaoKernSize);
}
__global__ void ssaoBlur(int w, int h, Fragment *dev_fragments, int ssaoTexSize, glm::vec3 *framebuffer) {
extern __shared__ Fragment sFrag[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= w || j >= h)
return;
// load the fragment into shared memory
int tIdx = threadIdx.x + blockDim.x * threadIdx.y;
int idx = i + w*j;
sFrag[tIdx] = dev_fragments[idx];
float ssao = 0.0f;
int n = 0;
for (int sj = j-ssaoTexSize/2; sj < j+ssaoTexSize/2; sj++) {
for (int si = i-ssaoTexSize/2; si < i+ssaoTexSize/2; si++) {
if (si >= 0 && sj >= 0 && si < w && sj < h)
ssao += dev_fragments[si + w*sj].ssao;
else
continue;
n++;
}}
ssao /= n;
framebuffer[idx] *= ssao;
}
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal, const glm::mat4 &P) {
int sideLength2d = 16;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Execute your rasterization pipeline here
// (See README for rasterization pipeline outline.)
// Vertex Process & primitive assembly
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly <<< numBlocksForVertices, numThreadsPerBlock >>>(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
_primitiveAssembly <<< numBlocksForIndices, numThreadsPerBlock >>>
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaMemset(dev_depth, 0xFF, width * height * sizeof(unsigned long long));
cudaMemset(dev_framebuffer, 0, width*height*sizeof(glm::vec3));
dim3 blockDim1d(1024);
dim3 blockCnt1d((totalNumPrimitives + blockDim1d.x - 1)/blockDim1d.x);
depthPass<<<blockCnt1d,blockDim1d>>>(totalNumPrimitives, dev_primitives, width, height, dev_depth);
checkCUDAError("fragDepthFind");
_fragRasterize<<<blockCount2d,blockSize2d>>>(totalNumPrimitives, dev_primitives, dev_fragmentBuffer, width, height, dev_depth, dev_framebuffer);
checkCUDAError("fragRasterize");
//cudaMemset(dev_framebuffer, 0, width*height*sizeof(glm::vec3));
int smSize = sideLength2d*sideLength2d*sizeof(Fragment);
ssaoPassShared<<<blockCount2d,blockSize2d,smSize>>>(width, height, dev_fragmentBuffer, P, 4, dev_ssaoTexObj, 8, dev_ssaoKernel, 5.0);
checkCUDAError("fragRasterize");
ssaoBlur<<<blockCount2d,blockSize2d,smSize>>>(width, height, dev_fragmentBuffer, 4, dev_framebuffer);
checkCUDAError("fragRasterize");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFreeArray(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_tiles_min);
cudaFree(dev_tiles_max);
dev_tiles_min = dev_tiles_max = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = nullptr;
checkCUDAError("rasterize Free");
}
|
2dfa482a568118a4af49a179c0c548edcb037d39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2016, Blue Brain Project
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "coreneuron/utils/randoms/nrnran123.h"
/* global data structure per process */
__device__ static const double SHIFT32 = 1.0 / 4294967297.0; /* 1/(2^32 + 1) */
__device__ static philox4x32_key_t k = {{0}};
__device__ static unsigned int instance_count_ = 0;
__device__ size_t nrnran123_instance_count() {
return instance_count_;
}
__device__ size_t nrnran123_state_size() {
return sizeof(nrnran123_State);
}
__device__ void nrnran123_set_globalindex(uint32_t gix) {
k.v[0] = gix;
}
/* if one sets the global, one should reset all the stream sequences. */
__device__ uint32_t nrnran123_get_globalindex() {
return k.v[0];
}
__global__ void nrnran123_setup_cuda_newstream(nrnran123_State* s,
uint32_t id1,
uint32_t id2,
uint32_t id3) {
s->c.v[0] = 0;
s->c.v[1] = id3;
s->c.v[2] = id1;
s->c.v[3] = id2;
nrnran123_setseq(s, 0, 0);
atomicAdd(&instance_count_, 1);
}
__global__ void nrnran123_cuda_deletestream(nrnran123_State* s) {
atomicSub(&instance_count_, 1);
}
__device__ void nrnran123_getseq(nrnran123_State* s, uint32_t* seq, unsigned char* which) {
*seq = s->c.v[0];
*which = s->which_;
}
__device__ void nrnran123_setseq(nrnran123_State* s, uint32_t seq, unsigned char which) {
if (which > 3) {
s->which_ = 0;
} else {
s->which_ = which;
}
s->c.v[0] = seq;
s->r = philox4x32(s->c, k);
}
__device__ void nrnran123_getids(nrnran123_State* s, uint32_t* id1, uint32_t* id2) {
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ void nrnran123_getids3(nrnran123_State* s, uint32_t* id1, uint32_t* id2, uint32_t* id3) {
*id3 = s->c.v[1];
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ uint32_t nrnran123_ipick(nrnran123_State* s) {
uint32_t rval;
unsigned char which = s->which_;
rval = s->r.v[which++];
if (which > 3) {
which = 0;
s->c.v[0]++;
s->r = philox4x32(s->c, k);
}
s->which_ = which;
return rval;
}
__device__ double nrnran123_dblpick(nrnran123_State* s) {
return nrnran123_uint2dbl(nrnran123_ipick(s));
}
__device__ double nrnran123_negexp(nrnran123_State* s) {
/* min 2.3283064e-10 to max 22.18071 */
return -log(nrnran123_dblpick(s));
}
/* at cost of a cached value we could compute two at a time. */
__device__ double nrnran123_normal(nrnran123_State* s) {
double w, x, y;
double u1, u2;
do {
u1 = nrnran123_dblpick(s);
u2 = nrnran123_dblpick(s);
u1 = 2. * u1 - 1.;
u2 = 2. * u2 - 1.;
w = (u1 * u1) + (u2 * u2);
} while (w > 1);
y = sqrt((-2. * log(w)) / w);
x = u1 * y;
return x;
}
__device__ double nrnran123_uint2dbl(uint32_t u) {
/* 0 to 2^32-1 transforms to double value in open (0,1) interval */
/* min 2.3283064e-10 to max (1 - 2.3283064e-10) */
return ((double)u + 1.0) * SHIFT32;
}
/* nrn123 streams are created from cpu launcher routine */
nrnran123_State* nrnran123_newstream(uint32_t id1, uint32_t id2) {
return nrnran123_newstream3(id1, id2, 0);
}
nrnran123_State* nrnran123_newstream3(uint32_t id1, uint32_t id2, uint32_t id3) {
nrnran123_State* s;
hipMalloc((void**)&s, sizeof(nrnran123_State));
hipMemset((void**)&s, 0, sizeof(nrnran123_State));
hipLaunchKernelGGL(( nrnran123_setup_cuda_newstream), dim3(1), dim3(1), 0, 0, s, id1, id2, id3);
hipDeviceSynchronize();
return s;
}
/* nrn123 streams are destroyed from cpu launcher routine */
void nrnran123_deletestream(nrnran123_State* s) {
hipLaunchKernelGGL(( nrnran123_cuda_deletestream), dim3(1), dim3(1), 0, 0, s);
hipDeviceSynchronize();
hipFree(s);
}
| 2dfa482a568118a4af49a179c0c548edcb037d39.cu | /*
Copyright (c) 2016, Blue Brain Project
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "coreneuron/utils/randoms/nrnran123.h"
/* global data structure per process */
__device__ static const double SHIFT32 = 1.0 / 4294967297.0; /* 1/(2^32 + 1) */
__device__ static philox4x32_key_t k = {{0}};
__device__ static unsigned int instance_count_ = 0;
__device__ size_t nrnran123_instance_count() {
return instance_count_;
}
__device__ size_t nrnran123_state_size() {
return sizeof(nrnran123_State);
}
__device__ void nrnran123_set_globalindex(uint32_t gix) {
k.v[0] = gix;
}
/* if one sets the global, one should reset all the stream sequences. */
__device__ uint32_t nrnran123_get_globalindex() {
return k.v[0];
}
__global__ void nrnran123_setup_cuda_newstream(nrnran123_State* s,
uint32_t id1,
uint32_t id2,
uint32_t id3) {
s->c.v[0] = 0;
s->c.v[1] = id3;
s->c.v[2] = id1;
s->c.v[3] = id2;
nrnran123_setseq(s, 0, 0);
atomicAdd(&instance_count_, 1);
}
__global__ void nrnran123_cuda_deletestream(nrnran123_State* s) {
atomicSub(&instance_count_, 1);
}
__device__ void nrnran123_getseq(nrnran123_State* s, uint32_t* seq, unsigned char* which) {
*seq = s->c.v[0];
*which = s->which_;
}
__device__ void nrnran123_setseq(nrnran123_State* s, uint32_t seq, unsigned char which) {
if (which > 3) {
s->which_ = 0;
} else {
s->which_ = which;
}
s->c.v[0] = seq;
s->r = philox4x32(s->c, k);
}
__device__ void nrnran123_getids(nrnran123_State* s, uint32_t* id1, uint32_t* id2) {
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ void nrnran123_getids3(nrnran123_State* s, uint32_t* id1, uint32_t* id2, uint32_t* id3) {
*id3 = s->c.v[1];
*id1 = s->c.v[2];
*id2 = s->c.v[3];
}
__device__ uint32_t nrnran123_ipick(nrnran123_State* s) {
uint32_t rval;
unsigned char which = s->which_;
rval = s->r.v[which++];
if (which > 3) {
which = 0;
s->c.v[0]++;
s->r = philox4x32(s->c, k);
}
s->which_ = which;
return rval;
}
__device__ double nrnran123_dblpick(nrnran123_State* s) {
return nrnran123_uint2dbl(nrnran123_ipick(s));
}
__device__ double nrnran123_negexp(nrnran123_State* s) {
/* min 2.3283064e-10 to max 22.18071 */
return -log(nrnran123_dblpick(s));
}
/* at cost of a cached value we could compute two at a time. */
__device__ double nrnran123_normal(nrnran123_State* s) {
double w, x, y;
double u1, u2;
do {
u1 = nrnran123_dblpick(s);
u2 = nrnran123_dblpick(s);
u1 = 2. * u1 - 1.;
u2 = 2. * u2 - 1.;
w = (u1 * u1) + (u2 * u2);
} while (w > 1);
y = sqrt((-2. * log(w)) / w);
x = u1 * y;
return x;
}
__device__ double nrnran123_uint2dbl(uint32_t u) {
/* 0 to 2^32-1 transforms to double value in open (0,1) interval */
/* min 2.3283064e-10 to max (1 - 2.3283064e-10) */
return ((double)u + 1.0) * SHIFT32;
}
/* nrn123 streams are created from cpu launcher routine */
nrnran123_State* nrnran123_newstream(uint32_t id1, uint32_t id2) {
return nrnran123_newstream3(id1, id2, 0);
}
nrnran123_State* nrnran123_newstream3(uint32_t id1, uint32_t id2, uint32_t id3) {
nrnran123_State* s;
cudaMalloc((void**)&s, sizeof(nrnran123_State));
cudaMemset((void**)&s, 0, sizeof(nrnran123_State));
nrnran123_setup_cuda_newstream<<<1, 1>>>(s, id1, id2, id3);
cudaDeviceSynchronize();
return s;
}
/* nrn123 streams are destroyed from cpu launcher routine */
void nrnran123_deletestream(nrnran123_State* s) {
nrnran123_cuda_deletestream<<<1, 1>>>(s);
cudaDeviceSynchronize();
cudaFree(s);
}
|
d815ac1b4fb2409be1cff33727d9f29035d5f27d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "matrix_matrix_new.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
int n_row = 1;
int n_col = 1;
int n_comm = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
matrix_matrix_new), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n_row,n_col,n_comm);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
matrix_matrix_new), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n_row,n_col,n_comm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
matrix_matrix_new), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,n_row,n_col,n_comm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d815ac1b4fb2409be1cff33727d9f29035d5f27d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "matrix_matrix_new.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
int n_row = 1;
int n_col = 1;
int n_comm = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
matrix_matrix_new<<<gridBlock,threadBlock>>>(a,b,c,n_row,n_col,n_comm);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
matrix_matrix_new<<<gridBlock,threadBlock>>>(a,b,c,n_row,n_col,n_comm);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
matrix_matrix_new<<<gridBlock,threadBlock>>>(a,b,c,n_row,n_col,n_comm);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3bcd9def6d1520c8d689e7b0e5fdcf1ffcfa1f01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LRNFillScale(const int nthreads, const Dtype* in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
Dtype* scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
}
}
template <typename Dtype>
Dtype LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
return CrossChannelForward_gpu(bottom, top);
case LRNParameter_NormRegion_WITHIN_CHANNEL:
return WithinChannelForward(bottom, top);
default:
LOG(FATAL) << "Unknown normalization region.";
return Dtype(0);
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
template <typename Dtype>
__global__ void LRNComputeOutput(const int nthreads, const Dtype* in,
const Dtype* scale, const Dtype negative_beta, Dtype* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype>
Dtype LRNLayer<Dtype>::CrossChannelForward_gpu(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNFillScale), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, bottom_data, num_, channels_, height_, width_, size_,
alpha_ / size_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNComputeOutput), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
template <typename Dtype>
__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* scale, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNComputeDiff), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,
size_, -beta_, Dtype(2. * alpha_ * beta_ / size_),
(*bottom)[0]->mutable_gpu_diff());
}
template <typename Dtype>
Dtype LRNLayer<Dtype>::CrossChannelReconstruct_gpu(
const vector<Blob<Dtype>*>& top, vector<Blob<Dtype>*>* bottom) {
// First, compute scale
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNFillScale), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, top_diff, num_, channels_, height_, width_, size_,
alpha_ / size_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LRNComputeOutput), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n_threads, top_diff, scale_data, -beta_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
void LRNLayer<Dtype>::Reconstruct_gpu(const vector<Blob<Dtype>*>& top,
vector<Blob<Dtype>*>* bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelReconstruct_gpu(top, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
INSTANTIATE_CLASS(LRNLayer);
} // namespace caffe
| 3bcd9def6d1520c8d689e7b0e5fdcf1ffcfa1f01.cu | // Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LRNFillScale(const int nthreads, const Dtype* in,
const int num, const int channels, const int height,
const int width, const int size, const Dtype alpha_over_size,
Dtype* scale) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
in += offset;
scale += offset;
int head = 0;
int pre_pad = (size - 1) / 2;
int post_pad = size - pre_pad - 1;
Dtype accum_scale = 0;
// fill the scale at [n, :, h, w]
// accumulate values
while (head < post_pad) {
accum_scale += in[head * step] * in[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_scale += in[head * step] * in[head * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// both add and subtract
while (head < channels) {
accum_scale += in[head * step] * in[head * step];
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_scale -= in[(head - size) * step] * in[(head - size) * step];
scale[(head - post_pad) * step] = 1. + accum_scale * alpha_over_size;
++head;
}
}
}
template <typename Dtype>
Dtype LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
return CrossChannelForward_gpu(bottom, top);
case LRNParameter_NormRegion_WITHIN_CHANNEL:
return WithinChannelForward(bottom, top);
default:
LOG(FATAL) << "Unknown normalization region.";
return Dtype(0);
}
}
// TODO: check if it would be faster to just put it into the previous kernel.
template <typename Dtype>
__global__ void LRNComputeOutput(const int nthreads, const Dtype* in,
const Dtype* scale, const Dtype negative_beta, Dtype* out) {
CUDA_KERNEL_LOOP(index, nthreads) {
out[index] = in[index] * pow(scale[index], negative_beta);
}
}
template <typename Dtype>
Dtype LRNLayer<Dtype>::CrossChannelForward_gpu(
const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) {
// First, compute scale
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
Dtype* scale_data = scale_.mutable_gpu_data();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom_data, num_, channels_, height_, width_, size_,
alpha_ / size_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = bottom[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, bottom_data, scale_data, -beta_, top_data);
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelBackward_gpu(top, propagate_down, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
WithinChannelBackward(top, propagate_down, bottom);
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
template <typename Dtype>
__global__ void LRNComputeDiff(const int nthreads, const Dtype* bottom_data,
const Dtype* top_data, const Dtype* scale, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int size, const Dtype negative_beta,
const Dtype cache_ratio,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int n = index / width / height;
int offset = (n * channels * height + h) * width + w;
int step = height * width;
bottom_data += offset;
top_data += offset;
scale += offset;
top_diff += offset;
bottom_diff += offset;
int head = 0;
int pre_pad = size - (size + 1) / 2;
int post_pad = size - pre_pad - 1;
Dtype accum_ratio = 0;
// accumulate values
while (head < post_pad) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
++head;
}
// until we reach size, nothing needs to be subtracted
while (head < size) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// both add and subtract
while (head < channels) {
accum_ratio += top_diff[head * step] * top_data[head * step] /
scale[head * step];
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
// subtract only
while (head < channels + post_pad) {
accum_ratio -= top_diff[(head - size) * step] *
top_data[(head - size) * step] / scale[(head - size) * step];
bottom_diff[(head - post_pad) * step] = top_diff[(head - post_pad) * step]
* pow(scale[(head - post_pad) * step], negative_beta) - cache_ratio *
bottom_data[(head - post_pad) * step] * accum_ratio;
++head;
}
}
}
template <typename Dtype>
void LRNLayer<Dtype>::CrossChannelBackward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, (*bottom)[0]->gpu_data(), top[0]->gpu_data(),
scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_,
size_, -beta_, Dtype(2. * alpha_ * beta_ / size_),
(*bottom)[0]->mutable_gpu_diff());
}
template <typename Dtype>
Dtype LRNLayer<Dtype>::CrossChannelReconstruct_gpu(
const vector<Blob<Dtype>*>& top, vector<Blob<Dtype>*>* bottom) {
// First, compute scale
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* scale_data = scale_.mutable_gpu_data();
// We will launch one kernel for each pixel location, and have the kernel
// go through all the channels.
int n_threads = num_ * height_ * width_;
// NOLINT_NEXT_LINE(whitespace/operators)
LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, top_diff, num_, channels_, height_, width_, size_,
alpha_ / size_, scale_data);
CUDA_POST_KERNEL_CHECK;
n_threads = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>(
n_threads, top_diff, scale_data, -beta_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
void LRNLayer<Dtype>::Reconstruct_gpu(const vector<Blob<Dtype>*>& top,
vector<Blob<Dtype>*>* bottom) {
switch (this->layer_param_.lrn_param().norm_region()) {
case LRNParameter_NormRegion_ACROSS_CHANNELS:
CrossChannelReconstruct_gpu(top, bottom);
break;
case LRNParameter_NormRegion_WITHIN_CHANNEL:
NOT_IMPLEMENTED;
break;
default:
LOG(FATAL) << "Unknown normalization region.";
}
}
INSTANTIATE_CLASS(LRNLayer);
} // namespace caffe
|
50897d764f11e5dec85ec328fdffdd736dced4bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cusp/csr_matrix.h>
#include <cusp/coo_matrix.h>
#include <cusp/io/matrix_market.h>
#include <cusp/krylov/gmres.h>
#include <cusp/monitor.h>
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision:
// $Date:
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* pcr_kernel.cu
*
* @brief CUDPP kernel-level PCR tridiagonal solver
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name Parallel cyclic reduction solver (PCR)
* @{
*/
/**
* @brief Parallel cyclic reduction solver (PCR)
*
* This kernel solves a tridiagonal linear system using the PCR algorithm.
*
* @param[out] d_x Solution vector
* @param[in] d_a Lower diagonal
* @param[in] d_b Main diagonal
* @param[in] d_c Upper diagonal
* @param[in] d_d Right hand side
*/
template <class T>
__global__ void pcrKernel(T *d_a, T *d_b, T *d_c, T *d_d, T *d_x)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
int delta = 1;
const unsigned int systemSize = blockDim.x;
int iteration = (int)log2(T(systemSize / 2));
__syncthreads();
extern __shared__ char shared[];
T* a = (T*)shared;
T* b = (T*)&a[systemSize];//
T* c = (T*)&b[systemSize];
T* d = (T*)&c[systemSize];
T* x = (T*)&d[systemSize];
a[thid] = d_a[thid + blid * systemSize];
b[thid] = d_b[thid + blid * systemSize];
c[thid] = d_c[thid + blid * systemSize];
d[thid] = d_d[thid + blid * systemSize];
T aNew, bNew, cNew, dNew;
__syncthreads();
//parallel cyclic reduction
for (int j = 0; j <iteration; j++)
{
int i = thid;
if (i < delta)
{
T tmp2 = c[i] / b[i + delta];
bNew = b[i] - a[i + delta] * tmp2;
dNew = d[i] - d[i + delta] * tmp2;
aNew = 0;
cNew = -c[i + delta] * tmp2;
}
else
{
if ((systemSize - i - 1) < delta)
{
T tmp = a[i] / b[i - delta];
bNew = b[i] - c[i - delta] * tmp;
dNew = d[i] - d[i - delta] * tmp;
aNew = -a[i - delta] * tmp;
cNew = 0;
}
else
{
T tmp1 = a[i] / b[i - delta];
T tmp2 = c[i] / b[i + delta];
bNew = b[i] - c[i - delta] * tmp1 - a[i + delta] * tmp2;
dNew = d[i] - d[i - delta] * tmp1 - d[i + delta] * tmp2;
aNew = -a[i - delta] * tmp1;
cNew = -c[i + delta] * tmp2;
}
}
__syncthreads();
b[i] = bNew;
d[i] = dNew;
a[i] = aNew;
c[i] = cNew;
delta *= 2;
__syncthreads();
}
if (thid < delta)
{
int addr1 = thid;
int addr2 = thid + delta;
T tmp3 = b[addr2] * b[addr1] - c[addr1] * a[addr2];
x[addr1] = (b[addr2] * d[addr1] - c[addr1] * d[addr2]) / tmp3;
x[addr2] = (d[addr2] * b[addr1] - d[addr1] * a[addr2]) / tmp3;
}
__syncthreads();
d_x[thid + blid * systemSize] = x[thid];
}
template <class T>
__global__ void pcrKernelBranchFree(T *d_a, T *d_b, T *d_c, T *d_d, T *d_x)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
int delta = 1;
const unsigned int systemSize = blockDim.x;
int iteration = (int)log2(T(systemSize / 2));
__syncthreads();
extern __shared__ char shared[];
T* a = (T*)shared;
T* b = (T*)&a[systemSize + 1];
T* c = (T*)&b[systemSize + 1];
T* d = (T*)&c[systemSize + 1];
T* x = (T*)&d[systemSize + 1];
a[thid] = d_a[thid + blid * systemSize];
b[thid] = d_b[thid + blid * systemSize];
c[thid] = d_c[thid + blid * systemSize];
d[thid] = d_d[thid + blid * systemSize];
T aNew, bNew, cNew, dNew;
__syncthreads();
//parallel cyclic reduction
for (int j = 0; j <iteration; j++)
{
int i = thid;
int iRight = i + delta;
//iRight = iRight%systemSize;
// if (iRight >= systemSize) iRight = systemSize - 1;
iRight = (iRight >= systemSize) * (systemSize)+(!(iRight >= systemSize)) * iRight;
int iLeft = i - delta;
//iLeft = iLeft%systemSize;
// if (iLeft < 0) iLeft = 0;
iLeft = (!(iLeft < 0)) * iLeft;
T tmp1 = a[i] / b[iLeft];
T tmp2 = c[i] / b[iRight];
bNew = b[i] - c[iLeft] * tmp1 - a[iRight] * tmp2;
dNew = d[i] - d[iLeft] * tmp1 - d[iRight] * tmp2;
aNew = -a[iLeft] * tmp1;
cNew = -c[iRight] * tmp2;
__syncthreads();
b[i] = bNew;
d[i] = dNew;
a[i] = aNew;
c[i] = cNew;
delta *= 2;
__syncthreads();
}
if (thid < delta)
{
int addr1 = thid;
int addr2 = thid + delta;
T tmp3 = b[addr2] * b[addr1] - c[addr1] * a[addr2];
x[addr1] = (b[addr2] * d[addr1] - c[addr1] * d[addr2]) / tmp3;
x[addr2] = (d[addr2] * b[addr1] - d[addr1] * a[addr2]) / tmp3;
}
__syncthreads();
d_x[thid + blid * systemSize] = x[thid];
}
/** @} */ // end tridiagonal functions
/** @} */ // end cudpp_kernel
#include "DefaultConfig.h"
#include "CMTask.h"
#include <cusp/dia_matrix.h>
#include <cusp/io/matrix_market.h>
int main()
{
RandomChainMatrixGenerator::CMatrixGen Generator = CreateDefaultChains();
vector<RandomChainMatrixGenerator::Chain> Chains = Generator.GetChains();
RandomChainMatrixGenerator::TriDiagonal Tridiag = Chains[0].GetChainMatrix();
int RowsNumber = Chains[0].ChainMatrix.GetRows();
int ColsNumber = Chains[0].ChainMatrix.GetCols();
int ElementsNumber = Chains[0].ChainMatrix.val.size();
cusp::array1d<float, cusp::device_memory> RightVector = Chains[0].RightVector;
cusp::array1d<float, cusp::device_memory> X(Chains[0].RightVector.size());
cusp::array1d<float, cusp::device_memory> A = Tridiag.a;
cusp::array1d<float, cusp::device_memory> B = Tridiag.b;
cusp::array1d<float, cusp::device_memory> C = Tridiag.c;
Chains[0].ChainMatrix.WriteMatrixMarketFile("matr.txt");
RandomChainMatrixGenerator::CMatrixGen::WriteMatrixMarkeFileVector("rv.txt", Chains[0].RightVector);
int numSystems = 1;
int systemSize = Chains[0].RightVector.size();
const unsigned int num_threads_block = systemSize;
// setup execution parameters
dim3 grid(numSystems, 1, 1);
dim3 threads(num_threads_block, 1, 1);
float* d_a = thrust::raw_pointer_cast(&A[0]);
float* d_b = thrust::raw_pointer_cast(&B[0]);
float* d_c = thrust::raw_pointer_cast(&C[0]);
float* d_d = thrust::raw_pointer_cast(&RightVector[0]);
float* d_x = thrust::raw_pointer_cast(&X[0]);
hipLaunchKernelGGL(( pcrKernel<float>) , dim3(grid), dim3(threads), (systemSize + 1) * 5 * sizeof(float) , 0, d_a, d_b, d_c, d_d, d_x);
cusp::io::write_matrix_market_file(X, "xv.txt");
//pcrKernelBranchFree<<< grid, threads,(systemSize+1)*5*sizeof(T)>>>(d_a, d_b, d_c, d_d, d_x);
//CMTask Task1;
//Task1.Chain.val = thrust::raw_pointer_cast(&ChainCSRMatrix.values[0]);
//Task1.Chain.row_ptr = thrust::raw_pointer_cast(&ChainCSRMatrix.row_offsets[0]);
//Task1.Chain.col_idx = thrust::raw_pointer_cast(&ChainCSRMatrix.column_indices[0]);
//Task1.links_num = 1;
//Task1.RightVector = thrust::raw_pointer_cast(&RightVector[0]);
//ker <<<1, 5>>>(Task1);
//pcrKernelBranchFree
return 0;
} | 50897d764f11e5dec85ec328fdffdd736dced4bc.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cusp/csr_matrix.h>
#include <cusp/coo_matrix.h>
#include <cusp/io/matrix_market.h>
#include <cusp/krylov/gmres.h>
#include <cusp/monitor.h>
// -------------------------------------------------------------
// cuDPP -- CUDA Data Parallel Primitives library
// -------------------------------------------------------------
// $Revision:
// $Date:
// -------------------------------------------------------------
// This source code is distributed under the terms of license.txt
// in the root directory of this source distribution.
// -------------------------------------------------------------
/**
* @file
* pcr_kernel.cu
*
* @brief CUDPP kernel-level PCR tridiagonal solver
*/
/** \addtogroup cudpp_kernel
* @{
*/
/** @name Parallel cyclic reduction solver (PCR)
* @{
*/
/**
* @brief Parallel cyclic reduction solver (PCR)
*
* This kernel solves a tridiagonal linear system using the PCR algorithm.
*
* @param[out] d_x Solution vector
* @param[in] d_a Lower diagonal
* @param[in] d_b Main diagonal
* @param[in] d_c Upper diagonal
* @param[in] d_d Right hand side
*/
template <class T>
__global__ void pcrKernel(T *d_a, T *d_b, T *d_c, T *d_d, T *d_x)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
int delta = 1;
const unsigned int systemSize = blockDim.x;
int iteration = (int)log2(T(systemSize / 2));
__syncthreads();
extern __shared__ char shared[];
T* a = (T*)shared;
T* b = (T*)&a[systemSize];//
T* c = (T*)&b[systemSize];
T* d = (T*)&c[systemSize];
T* x = (T*)&d[systemSize];
a[thid] = d_a[thid + blid * systemSize];
b[thid] = d_b[thid + blid * systemSize];
c[thid] = d_c[thid + blid * systemSize];
d[thid] = d_d[thid + blid * systemSize];
T aNew, bNew, cNew, dNew;
__syncthreads();
//parallel cyclic reduction
for (int j = 0; j <iteration; j++)
{
int i = thid;
if (i < delta)
{
T tmp2 = c[i] / b[i + delta];
bNew = b[i] - a[i + delta] * tmp2;
dNew = d[i] - d[i + delta] * tmp2;
aNew = 0;
cNew = -c[i + delta] * tmp2;
}
else
{
if ((systemSize - i - 1) < delta)
{
T tmp = a[i] / b[i - delta];
bNew = b[i] - c[i - delta] * tmp;
dNew = d[i] - d[i - delta] * tmp;
aNew = -a[i - delta] * tmp;
cNew = 0;
}
else
{
T tmp1 = a[i] / b[i - delta];
T tmp2 = c[i] / b[i + delta];
bNew = b[i] - c[i - delta] * tmp1 - a[i + delta] * tmp2;
dNew = d[i] - d[i - delta] * tmp1 - d[i + delta] * tmp2;
aNew = -a[i - delta] * tmp1;
cNew = -c[i + delta] * tmp2;
}
}
__syncthreads();
b[i] = bNew;
d[i] = dNew;
a[i] = aNew;
c[i] = cNew;
delta *= 2;
__syncthreads();
}
if (thid < delta)
{
int addr1 = thid;
int addr2 = thid + delta;
T tmp3 = b[addr2] * b[addr1] - c[addr1] * a[addr2];
x[addr1] = (b[addr2] * d[addr1] - c[addr1] * d[addr2]) / tmp3;
x[addr2] = (d[addr2] * b[addr1] - d[addr1] * a[addr2]) / tmp3;
}
__syncthreads();
d_x[thid + blid * systemSize] = x[thid];
}
template <class T>
__global__ void pcrKernelBranchFree(T *d_a, T *d_b, T *d_c, T *d_d, T *d_x)
{
int thid = threadIdx.x;
int blid = blockIdx.x;
int delta = 1;
const unsigned int systemSize = blockDim.x;
int iteration = (int)log2(T(systemSize / 2));
__syncthreads();
extern __shared__ char shared[];
T* a = (T*)shared;
T* b = (T*)&a[systemSize + 1];
T* c = (T*)&b[systemSize + 1];
T* d = (T*)&c[systemSize + 1];
T* x = (T*)&d[systemSize + 1];
a[thid] = d_a[thid + blid * systemSize];
b[thid] = d_b[thid + blid * systemSize];
c[thid] = d_c[thid + blid * systemSize];
d[thid] = d_d[thid + blid * systemSize];
T aNew, bNew, cNew, dNew;
__syncthreads();
//parallel cyclic reduction
for (int j = 0; j <iteration; j++)
{
int i = thid;
int iRight = i + delta;
//iRight = iRight%systemSize;
// if (iRight >= systemSize) iRight = systemSize - 1;
iRight = (iRight >= systemSize) * (systemSize)+(!(iRight >= systemSize)) * iRight;
int iLeft = i - delta;
//iLeft = iLeft%systemSize;
// if (iLeft < 0) iLeft = 0;
iLeft = (!(iLeft < 0)) * iLeft;
T tmp1 = a[i] / b[iLeft];
T tmp2 = c[i] / b[iRight];
bNew = b[i] - c[iLeft] * tmp1 - a[iRight] * tmp2;
dNew = d[i] - d[iLeft] * tmp1 - d[iRight] * tmp2;
aNew = -a[iLeft] * tmp1;
cNew = -c[iRight] * tmp2;
__syncthreads();
b[i] = bNew;
d[i] = dNew;
a[i] = aNew;
c[i] = cNew;
delta *= 2;
__syncthreads();
}
if (thid < delta)
{
int addr1 = thid;
int addr2 = thid + delta;
T tmp3 = b[addr2] * b[addr1] - c[addr1] * a[addr2];
x[addr1] = (b[addr2] * d[addr1] - c[addr1] * d[addr2]) / tmp3;
x[addr2] = (d[addr2] * b[addr1] - d[addr1] * a[addr2]) / tmp3;
}
__syncthreads();
d_x[thid + blid * systemSize] = x[thid];
}
/** @} */ // end tridiagonal functions
/** @} */ // end cudpp_kernel
#include "DefaultConfig.h"
#include "CMTask.h"
#include <cusp/dia_matrix.h>
#include <cusp/io/matrix_market.h>
int main()
{
RandomChainMatrixGenerator::CMatrixGen Generator = CreateDefaultChains();
vector<RandomChainMatrixGenerator::Chain> Chains = Generator.GetChains();
RandomChainMatrixGenerator::TriDiagonal Tridiag = Chains[0].GetChainMatrix();
int RowsNumber = Chains[0].ChainMatrix.GetRows();
int ColsNumber = Chains[0].ChainMatrix.GetCols();
int ElementsNumber = Chains[0].ChainMatrix.val.size();
cusp::array1d<float, cusp::device_memory> RightVector = Chains[0].RightVector;
cusp::array1d<float, cusp::device_memory> X(Chains[0].RightVector.size());
cusp::array1d<float, cusp::device_memory> A = Tridiag.a;
cusp::array1d<float, cusp::device_memory> B = Tridiag.b;
cusp::array1d<float, cusp::device_memory> C = Tridiag.c;
Chains[0].ChainMatrix.WriteMatrixMarketFile("matr.txt");
RandomChainMatrixGenerator::CMatrixGen::WriteMatrixMarkeFileVector("rv.txt", Chains[0].RightVector);
int numSystems = 1;
int systemSize = Chains[0].RightVector.size();
const unsigned int num_threads_block = systemSize;
// setup execution parameters
dim3 grid(numSystems, 1, 1);
dim3 threads(num_threads_block, 1, 1);
float* d_a = thrust::raw_pointer_cast(&A[0]);
float* d_b = thrust::raw_pointer_cast(&B[0]);
float* d_c = thrust::raw_pointer_cast(&C[0]);
float* d_d = thrust::raw_pointer_cast(&RightVector[0]);
float* d_x = thrust::raw_pointer_cast(&X[0]);
pcrKernel<float> <<< grid, threads, (systemSize + 1) * 5 * sizeof(float) >>>(d_a, d_b, d_c, d_d, d_x);
cusp::io::write_matrix_market_file(X, "xv.txt");
//pcrKernelBranchFree<<< grid, threads,(systemSize+1)*5*sizeof(T)>>>(d_a, d_b, d_c, d_d, d_x);
//CMTask Task1;
//Task1.Chain.val = thrust::raw_pointer_cast(&ChainCSRMatrix.values[0]);
//Task1.Chain.row_ptr = thrust::raw_pointer_cast(&ChainCSRMatrix.row_offsets[0]);
//Task1.Chain.col_idx = thrust::raw_pointer_cast(&ChainCSRMatrix.column_indices[0]);
//Task1.links_num = 1;
//Task1.RightVector = thrust::raw_pointer_cast(&RightVector[0]);
//ker <<<1, 5>>>(Task1);
//pcrKernelBranchFree
return 0;
} |
bf59d889ea4dd2c9522435bee759adb032701a70.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Compute_weightx_weighty2_norm0_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *weightx = NULL;
hipMalloc(&weightx, XSIZE*YSIZE);
float *weighty = NULL;
hipMalloc(&weighty, XSIZE*YSIZE);
const float *absIx = NULL;
hipMalloc(&absIx, XSIZE*YSIZE);
const float *absIy = NULL;
hipMalloc(&absIy, XSIZE*YSIZE);
int nPixels = 1;
float norm_for_smooth_term = XSIZE*YSIZE;
float eps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Compute_weightx_weighty2_norm0_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightx,weighty,absIx,absIy,nPixels,norm_for_smooth_term,eps);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Compute_weightx_weighty2_norm0_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightx,weighty,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Compute_weightx_weighty2_norm0_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, weightx,weighty,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | bf59d889ea4dd2c9522435bee759adb032701a70.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Compute_weightx_weighty2_norm0_Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *weightx = NULL;
cudaMalloc(&weightx, XSIZE*YSIZE);
float *weighty = NULL;
cudaMalloc(&weighty, XSIZE*YSIZE);
const float *absIx = NULL;
cudaMalloc(&absIx, XSIZE*YSIZE);
const float *absIy = NULL;
cudaMalloc(&absIy, XSIZE*YSIZE);
int nPixels = 1;
float norm_for_smooth_term = XSIZE*YSIZE;
float eps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Compute_weightx_weighty2_norm0_Kernel<<<gridBlock,threadBlock>>>(weightx,weighty,absIx,absIy,nPixels,norm_for_smooth_term,eps);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Compute_weightx_weighty2_norm0_Kernel<<<gridBlock,threadBlock>>>(weightx,weighty,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Compute_weightx_weighty2_norm0_Kernel<<<gridBlock,threadBlock>>>(weightx,weighty,absIx,absIy,nPixels,norm_for_smooth_term,eps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4a63f7ac8d5abca9173f7faeb5c949c257a6578f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "partitions.cuh"
#include "fill.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
namespace NKernel {
__global__ void UpdatePartitionSizes(TDataPartition* parts, ui32 partCount,
const int* sortedBins, ui32 size) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
int bin0 = sortedBins[i];
int bin1 = i ? sortedBins[i - 1] : 0;
if (bin0 != bin1) {
int b = bin1;
while (b < bin0) {
parts[b].Size = i - parts[b].Offset;
b++;
}
}
if ((i + 1) == size) {
parts[bin0].Size = size - parts[bin0].Offset;
int b = bin0 + 1;
while (b < partCount) {
parts[b].Size = 0;
b++;
}
}
i += blockDim.x * gridDim.x;
}
}
__global__ void ComputeSizes(ui32* beginOffsets, ui32* endOffsets, ui32 count, float* dst) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < count) {
dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]);
}
}
struct TPartitionOffsetWriter {
using TStorageType = TDataPartition;
TDataPartition* Parts;
__device__ TPartitionOffsetWriter(TDataPartition* parts)
: Parts(parts) {
}
__device__ void Write(ui32 bin, ui32 offset) {
Parts[bin].Offset = offset;
}
};
struct TVecOffsetWriter {
using TStorageType = ui32;
ui32* BinOffsets;
__device__ TVecOffsetWriter(ui32* offsets)
: BinOffsets(offsets) {
}
__device__ void Write(ui32 bin, ui32 offset) {
BinOffsets[bin] = offset;
}
};
template <class TWriter, bool DONT_WRITE_EMPTY_SUFFIX>
__global__ void UpdatePartitionOffsets(typename TWriter::TStorageType* parts, ui32 partCount,
const int* sortedBins, ui32 size) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
int lastBin = DONT_WRITE_EMPTY_SUFFIX ? LdgWithFallback(sortedBins + size - 1, 0) : 1 << 31;
TWriter writer(parts);
while (i < size) {
int bin0 = sortedBins[i];
int bin1 = i ? sortedBins[i - 1] : -1;
if (bin0 != bin1) {
int b = bin0;
while (b > bin1) {
writer.Write(b, i);
b--;
}
}
if (i == (size - 1)) {
int b = bin0 + 1;
while (b < min(lastBin, partCount)) {
writer.Write(b, size);
b++;
}
}
i += blockDim.x * gridDim.x;
}
}
__global__ void ZeroPartitions(TDataPartition* __restrict parts, ui32 partCount)
{
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < partCount) {
parts[i].Size = 0;
parts[i].Offset = 0;
i += blockDim.x * gridDim.x;
}
}
void UpdatePartitionDimensions(TDataPartition* parts, ui32 partCount,
const ui32* sortedBins, ui32 size,
TCudaStream stream)
{
const ui32 blockSize = 256;
const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount());
if (numBlocks)
{
UpdatePartitionOffsets<TPartitionOffsetWriter, false> << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size);
UpdatePartitionSizes << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size);
} else {
const ui32 numBlocksClear = (partCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( ZeroPartitions), dim3(numBlocksClear), dim3(blockSize), 0, stream, parts, partCount);
}
}
__global__ void ComputeSegmentSizesImpl(const ui32* beginOffsets, const ui32* endOffsets, ui32 count, float* dst) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < count) {
dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]);
}
}
void ComputeSegmentSizes(const ui32* offsets, ui32 size,
float* dst, TCudaStream stream) {
size -= 1;
const ui32* begin = offsets;
const ui32* end = offsets + 1;
const ui32 blockSize = 256;
const ui32 numBlocks = (size + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( ComputeSegmentSizesImpl) , dim3(numBlocks), dim3(blockSize), 0, stream , begin, end, size, dst);
}
void UpdatePartitionOffsets(ui32* partOffsets, ui32 partCount,
const ui32* sortedBins, ui32 size, TCudaStream stream)
{
const ui32 blockSize = 256;
const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount());
//partOffsets are copyMapping of bins, usually with empty tail
bool skipSuffixBins = false;
if (numBlocks)
{
if (partCount == size)
{
FillBuffer(partOffsets, size, size, stream);
skipSuffixBins = true;
}
if (skipSuffixBins)
{
UpdatePartitionOffsets<TVecOffsetWriter, true> << < numBlocks, blockSize, 0, stream >> >
(partOffsets, partCount, (int*) sortedBins, size);
} else {
UpdatePartitionOffsets<TVecOffsetWriter, false> << < numBlocks, blockSize, 0, stream >> > (partOffsets, partCount, (int*) sortedBins, size);
}
} else {
FillBuffer(partOffsets, static_cast<ui32>(0), partCount, stream);
}
}
}
| 4a63f7ac8d5abca9173f7faeb5c949c257a6578f.cu | #include "partitions.cuh"
#include "fill.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
namespace NKernel {
__global__ void UpdatePartitionSizes(TDataPartition* parts, ui32 partCount,
const int* sortedBins, ui32 size) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < size) {
int bin0 = sortedBins[i];
int bin1 = i ? sortedBins[i - 1] : 0;
if (bin0 != bin1) {
int b = bin1;
while (b < bin0) {
parts[b].Size = i - parts[b].Offset;
b++;
}
}
if ((i + 1) == size) {
parts[bin0].Size = size - parts[bin0].Offset;
int b = bin0 + 1;
while (b < partCount) {
parts[b].Size = 0;
b++;
}
}
i += blockDim.x * gridDim.x;
}
}
__global__ void ComputeSizes(ui32* beginOffsets, ui32* endOffsets, ui32 count, float* dst) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < count) {
dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]);
}
}
struct TPartitionOffsetWriter {
using TStorageType = TDataPartition;
TDataPartition* Parts;
__device__ TPartitionOffsetWriter(TDataPartition* parts)
: Parts(parts) {
}
__device__ void Write(ui32 bin, ui32 offset) {
Parts[bin].Offset = offset;
}
};
struct TVecOffsetWriter {
using TStorageType = ui32;
ui32* BinOffsets;
__device__ TVecOffsetWriter(ui32* offsets)
: BinOffsets(offsets) {
}
__device__ void Write(ui32 bin, ui32 offset) {
BinOffsets[bin] = offset;
}
};
template <class TWriter, bool DONT_WRITE_EMPTY_SUFFIX>
__global__ void UpdatePartitionOffsets(typename TWriter::TStorageType* parts, ui32 partCount,
const int* sortedBins, ui32 size) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
int lastBin = DONT_WRITE_EMPTY_SUFFIX ? LdgWithFallback(sortedBins + size - 1, 0) : 1 << 31;
TWriter writer(parts);
while (i < size) {
int bin0 = sortedBins[i];
int bin1 = i ? sortedBins[i - 1] : -1;
if (bin0 != bin1) {
int b = bin0;
while (b > bin1) {
writer.Write(b, i);
b--;
}
}
if (i == (size - 1)) {
int b = bin0 + 1;
while (b < min(lastBin, partCount)) {
writer.Write(b, size);
b++;
}
}
i += blockDim.x * gridDim.x;
}
}
__global__ void ZeroPartitions(TDataPartition* __restrict parts, ui32 partCount)
{
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < partCount) {
parts[i].Size = 0;
parts[i].Offset = 0;
i += blockDim.x * gridDim.x;
}
}
void UpdatePartitionDimensions(TDataPartition* parts, ui32 partCount,
const ui32* sortedBins, ui32 size,
TCudaStream stream)
{
const ui32 blockSize = 256;
const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount());
if (numBlocks)
{
UpdatePartitionOffsets<TPartitionOffsetWriter, false> << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size);
UpdatePartitionSizes << < numBlocks, blockSize, 0, stream >> > (parts, partCount, (int*)sortedBins, size);
} else {
const ui32 numBlocksClear = (partCount + blockSize - 1) / blockSize;
ZeroPartitions<<<numBlocksClear, blockSize, 0, stream>>>(parts, partCount);
}
}
__global__ void ComputeSegmentSizesImpl(const ui32* beginOffsets, const ui32* endOffsets, ui32 count, float* dst) {
ui32 i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < count) {
dst[i] = static_cast<float>(endOffsets[i] - beginOffsets[i]);
}
}
void ComputeSegmentSizes(const ui32* offsets, ui32 size,
float* dst, TCudaStream stream) {
size -= 1;
const ui32* begin = offsets;
const ui32* end = offsets + 1;
const ui32 blockSize = 256;
const ui32 numBlocks = (size + blockSize - 1) / blockSize;
ComputeSegmentSizesImpl <<< numBlocks, blockSize, 0, stream >>> (begin, end, size, dst);
}
void UpdatePartitionOffsets(ui32* partOffsets, ui32 partCount,
const ui32* sortedBins, ui32 size, TCudaStream stream)
{
const ui32 blockSize = 256;
const ui32 numBlocks = min((size + blockSize - 1) / blockSize, (ui32)TArchProps::MaxBlockCount());
//partOffsets are copyMapping of bins, usually with empty tail
bool skipSuffixBins = false;
if (numBlocks)
{
if (partCount == size)
{
FillBuffer(partOffsets, size, size, stream);
skipSuffixBins = true;
}
if (skipSuffixBins)
{
UpdatePartitionOffsets<TVecOffsetWriter, true> << < numBlocks, blockSize, 0, stream >> >
(partOffsets, partCount, (int*) sortedBins, size);
} else {
UpdatePartitionOffsets<TVecOffsetWriter, false> << < numBlocks, blockSize, 0, stream >> > (partOffsets, partCount, (int*) sortedBins, size);
}
} else {
FillBuffer(partOffsets, static_cast<ui32>(0), partCount, stream);
}
}
}
|
bbe4472933cc950f9b986d141b6cc1947cf99816.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Reference code implementing the box blur filter.
Build and execute as follows:
make clean && make
./blur_filter size
Author: Naga Kandasamy
Date created: May 3, 2019
Date modified: February 15, 2021
Student name(s): Kevin Connell, Casey Adams
Date modified: 2/22/2021
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
/* #define DEBUG */
/* Include the kernel code */
#include "blur_filter_kernel.cu"
extern "C" void compute_gold(const image_t, image_t);
void compute_on_device(const image_t, image_t);
int check_results(const float *, const float *, int, float);
void print_image(const image_t);
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(stderr, "Usage: %s size\n", argv[0]);
fprintf(stderr, "size: Height of the image. The program assumes size x size image.\n");
exit(EXIT_FAILURE);
}
/* Allocate memory for the input and output images */
int size = atoi(argv[1]);
fprintf(stderr, "Creating %d x %d images\n", size, size);
image_t in, out_gold, out_gpu;
in.size = out_gold.size = out_gpu.size = size;
in.element = (float *)malloc(sizeof(float) * size * size);
out_gold.element = (float *)malloc(sizeof(float) * size * size);
out_gpu.element = (float *)malloc(sizeof(float) * size * size);
if ((in.element == NULL) || (out_gold.element == NULL) || (out_gpu.element == NULL)) {
perror("Malloc");
exit(EXIT_FAILURE);
}
/* Poplulate our image with random values between [-0.5 +0.5] */
srand(time(NULL));
int i;
for (i = 0; i < size * size; i++)
in.element[i] = rand()/(float)RAND_MAX - 0.5;
/* Calculate the blur on the CPU. The result is stored in out_gold. */
fprintf(stderr, "Calculating blur on the CPU\n");
struct timeval start, stop;
gettimeofday(&start, NULL);
compute_gold(in, out_gold);
gettimeofday(&stop, NULL);
printf("Execution Time: %f\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000));
#ifdef DEBUG
print_image(in);
print_image(out_gold);
#endif
/* FIXME: Calculate the blur on the GPU. The result is stored in out_gpu. */
fprintf(stderr, "Calculating blur on the GPU\n");
compute_on_device(in, out_gpu);
/* Check CPU and GPU results for correctness */
fprintf(stderr, "Checking CPU and GPU results\n");
int num_elements = out_gold.size * out_gold.size;
float eps = 1e-6; /* Do not change */
int check;
check = check_results(out_gold.element, out_gpu.element, num_elements, eps);
if (check == 0)
fprintf(stderr, "TEST PASSED\n");
else
fprintf(stderr, "TEST FAILED\n");
/* Free data structures on the host */
free((void *)in.element);
free((void *)out_gold.element);
free((void *)out_gpu.element);
exit(EXIT_SUCCESS);
}
/* FIXME: Complete this function to calculate the blur on the GPU */
void compute_on_device(const image_t in, image_t out)
{
int thread_size = min(in.size, 32); //up to 1024 thread blocks
image_t in_OD = in;//OD = on_device
image_t out_OD = out;
hipMalloc((void**)&in_OD.element, in.size * in.size * sizeof(float));
in_OD.size = in.size;
hipMemcpy(in_OD.element, in.element, in.size * in.size * sizeof(float), hipMemcpyHostToDevice);
hipMalloc((void**)&out_OD.element, out.size * out.size * sizeof(float));
dim3 thread_blocks(thread_size, thread_size, 1);
dim3 grid(out.size / thread_blocks.x, out.size / thread_blocks.y, 1);
struct timeval start, stop;
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( blur_filter_kernel), dim3(grid), dim3(thread_blocks), 0, 0, in_OD.element, out_OD.element, out_OD.size);
hipDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("Execution Time: %f\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000));
hipMemcpy(out.element, out_OD.element, out.size * out.size * sizeof(float), hipMemcpyDeviceToHost);
hipFree(in_OD.element);
hipFree(out_OD.element);
}
/* Check correctness of results */
int check_results(const float *pix1, const float *pix2, int num_elements, float eps)
{
int i;
for (i = 0; i < num_elements; i++)
if (fabsf((pix1[i] - pix2[i])/pix1[i]) > eps)
return -1;
return 0;
}
/* Print out the image contents */
void print_image(const image_t img)
{
int i, j;
float val;
for (i = 0; i < img.size; i++) {
for (j = 0; j < img.size; j++) {
val = img.element[i * img.size + j];
printf("%0.4f ", val);
}
printf("\n");
}
printf("\n");
}
| bbe4472933cc950f9b986d141b6cc1947cf99816.cu | /* Reference code implementing the box blur filter.
Build and execute as follows:
make clean && make
./blur_filter size
Author: Naga Kandasamy
Date created: May 3, 2019
Date modified: February 15, 2021
Student name(s): Kevin Connell, Casey Adams
Date modified: 2/22/2021
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <sys/time.h>
/* #define DEBUG */
/* Include the kernel code */
#include "blur_filter_kernel.cu"
extern "C" void compute_gold(const image_t, image_t);
void compute_on_device(const image_t, image_t);
int check_results(const float *, const float *, int, float);
void print_image(const image_t);
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(stderr, "Usage: %s size\n", argv[0]);
fprintf(stderr, "size: Height of the image. The program assumes size x size image.\n");
exit(EXIT_FAILURE);
}
/* Allocate memory for the input and output images */
int size = atoi(argv[1]);
fprintf(stderr, "Creating %d x %d images\n", size, size);
image_t in, out_gold, out_gpu;
in.size = out_gold.size = out_gpu.size = size;
in.element = (float *)malloc(sizeof(float) * size * size);
out_gold.element = (float *)malloc(sizeof(float) * size * size);
out_gpu.element = (float *)malloc(sizeof(float) * size * size);
if ((in.element == NULL) || (out_gold.element == NULL) || (out_gpu.element == NULL)) {
perror("Malloc");
exit(EXIT_FAILURE);
}
/* Poplulate our image with random values between [-0.5 +0.5] */
srand(time(NULL));
int i;
for (i = 0; i < size * size; i++)
in.element[i] = rand()/(float)RAND_MAX - 0.5;
/* Calculate the blur on the CPU. The result is stored in out_gold. */
fprintf(stderr, "Calculating blur on the CPU\n");
struct timeval start, stop;
gettimeofday(&start, NULL);
compute_gold(in, out_gold);
gettimeofday(&stop, NULL);
printf("Execution Time: %f\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000));
#ifdef DEBUG
print_image(in);
print_image(out_gold);
#endif
/* FIXME: Calculate the blur on the GPU. The result is stored in out_gpu. */
fprintf(stderr, "Calculating blur on the GPU\n");
compute_on_device(in, out_gpu);
/* Check CPU and GPU results for correctness */
fprintf(stderr, "Checking CPU and GPU results\n");
int num_elements = out_gold.size * out_gold.size;
float eps = 1e-6; /* Do not change */
int check;
check = check_results(out_gold.element, out_gpu.element, num_elements, eps);
if (check == 0)
fprintf(stderr, "TEST PASSED\n");
else
fprintf(stderr, "TEST FAILED\n");
/* Free data structures on the host */
free((void *)in.element);
free((void *)out_gold.element);
free((void *)out_gpu.element);
exit(EXIT_SUCCESS);
}
/* FIXME: Complete this function to calculate the blur on the GPU */
void compute_on_device(const image_t in, image_t out)
{
int thread_size = min(in.size, 32); //up to 1024 thread blocks
image_t in_OD = in;//OD = on_device
image_t out_OD = out;
cudaMalloc((void**)&in_OD.element, in.size * in.size * sizeof(float));
in_OD.size = in.size;
cudaMemcpy(in_OD.element, in.element, in.size * in.size * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&out_OD.element, out.size * out.size * sizeof(float));
dim3 thread_blocks(thread_size, thread_size, 1);
dim3 grid(out.size / thread_blocks.x, out.size / thread_blocks.y, 1);
struct timeval start, stop;
gettimeofday(&start, NULL);
blur_filter_kernel<<<grid, thread_blocks>>>(in_OD.element, out_OD.element, out_OD.size);
cudaThreadSynchronize();
gettimeofday(&stop, NULL);
printf("Execution Time: %f\n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec) / (float)1000000));
cudaMemcpy(out.element, out_OD.element, out.size * out.size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(in_OD.element);
cudaFree(out_OD.element);
}
/* Check correctness of results */
int check_results(const float *pix1, const float *pix2, int num_elements, float eps)
{
int i;
for (i = 0; i < num_elements; i++)
if (fabsf((pix1[i] - pix2[i])/pix1[i]) > eps)
return -1;
return 0;
}
/* Print out the image contents */
void print_image(const image_t img)
{
int i, j;
float val;
for (i = 0; i < img.size; i++) {
for (j = 0; j < img.size; j++) {
val = img.element[i * img.size + j];
printf("%0.4f ", val);
}
printf("\n");
}
printf("\n");
}
|
b3752b5c67a292a405d6629d83720fa99b6d07c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "helper.hpp"
#define BLOCK_SIZE 512
__global__ void s2g_gpu_scatter_kernel(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT KERNEL CODE HERE
int tx = threadIdx.x, bx = blockIdx.x;
int input_index = bx * BLOCK_SIZE + tx;
if(input_index < len){
uint32_t intermediate = outInvariant(in[input_index]);
for(int output_index = 0; output_index < len; output_index++){
atomicAdd(&(out[output_index]), outDependent(intermediate, input_index, output_index));
}
}
}
static void s2g_cpu_scatter(uint32_t *in, uint32_t *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx){
uint32_t intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT CODE HERE
//*in deviceInput
//*out deviceOutput
dim3 DimGrid(ceil((len * 1.0)/BLOCK_SIZE),1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
hipLaunchKernelGGL(( s2g_gpu_scatter_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, in, out, len);
}
static int eval(int inputLength) {
uint32_t *deviceInput = nullptr;
uint32_t *deviceOutput= nullptr;
const std::string conf_info =
std::string("scatter[len:") + std::to_string(inputLength) + "]";
INFO("Running " << conf_info);
auto hostInput = generate_input(inputLength);
const size_t byteCount = inputLength * sizeof(uint32_t);
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(hipMalloc((void **)&deviceInput, byteCount));
THROW_IF_ERROR(hipMalloc((void **)&deviceOutput, byteCount));
timer_stop();
timer_start("Copying input memory to the GPU.");
THROW_IF_ERROR(hipMemcpy(deviceInput, hostInput.data(), byteCount,
hipMemcpyHostToDevice));
THROW_IF_ERROR(hipMemset(deviceOutput, 0, byteCount));
timer_stop();
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
timer_start( "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
timer_stop();
std::vector<uint32_t> hostOutput(inputLength);
timer_start( "Copying output memory to the CPU");
THROW_IF_ERROR(hipMemcpy(hostOutput.data(), deviceOutput, byteCount,
hipMemcpyDeviceToHost));
timer_stop();
auto expected = compute_output(hostInput, inputLength);
verify(expected, hostOutput);
hipFree(deviceInput);
hipFree(deviceOutput);
return 0;
}
TEST_CASE("Scatter", "[scatter]") {
SECTION("[inputSize:1024]") {
eval(1024);
}
SECTION("[inputSize:2048]") {
eval(2048);
}
SECTION("[inputSize:2047]") {
eval(2047);
}
SECTION("[inputSize:2049]") {
eval(2049);
}
SECTION("[inputSize:9101]") {
eval(9101);
}
SECTION("[inputSize:9910]") {
eval(9910);
}
SECTION("[inputSize:8192]") {
eval(8192);
}
SECTION("[inputSize:8193]") {
eval(8193);
}
SECTION("[inputSize:8191]") {
eval(8191);
}
SECTION("[inputSize:16191]") {
eval(16191);
}
}
| b3752b5c67a292a405d6629d83720fa99b6d07c2.cu | #include "helper.hpp"
#define BLOCK_SIZE 512
__global__ void s2g_gpu_scatter_kernel(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT KERNEL CODE HERE
int tx = threadIdx.x, bx = blockIdx.x;
int input_index = bx * BLOCK_SIZE + tx;
if(input_index < len){
uint32_t intermediate = outInvariant(in[input_index]);
for(int output_index = 0; output_index < len; output_index++){
atomicAdd(&(out[output_index]), outDependent(intermediate, input_index, output_index));
}
}
}
static void s2g_cpu_scatter(uint32_t *in, uint32_t *out, int len) {
for (int inIdx = 0; inIdx < len; ++inIdx){
uint32_t intermediate = outInvariant(in[inIdx]);
for (int outIdx = 0; outIdx < len; ++outIdx) {
out[outIdx] += outDependent(intermediate, inIdx, outIdx);
}
}
}
static void s2g_gpu_scatter(uint32_t *in, uint32_t *out, int len) {
//@@ INSERT CODE HERE
//*in deviceInput
//*out deviceOutput
dim3 DimGrid(ceil((len * 1.0)/BLOCK_SIZE),1,1);
dim3 DimBlock(BLOCK_SIZE,1,1);
s2g_gpu_scatter_kernel<<<DimGrid, DimBlock>>>(in, out, len);
}
static int eval(int inputLength) {
uint32_t *deviceInput = nullptr;
uint32_t *deviceOutput= nullptr;
const std::string conf_info =
std::string("scatter[len:") + std::to_string(inputLength) + "]";
INFO("Running " << conf_info);
auto hostInput = generate_input(inputLength);
const size_t byteCount = inputLength * sizeof(uint32_t);
timer_start("Allocating GPU memory.");
THROW_IF_ERROR(cudaMalloc((void **)&deviceInput, byteCount));
THROW_IF_ERROR(cudaMalloc((void **)&deviceOutput, byteCount));
timer_stop();
timer_start("Copying input memory to the GPU.");
THROW_IF_ERROR(cudaMemcpy(deviceInput, hostInput.data(), byteCount,
cudaMemcpyHostToDevice));
THROW_IF_ERROR(cudaMemset(deviceOutput, 0, byteCount));
timer_stop();
//////////////////////////////////////////
// GPU Scatter Computation
//////////////////////////////////////////
timer_start( "Performing GPU Scatter computation");
s2g_gpu_scatter(deviceInput, deviceOutput, inputLength);
timer_stop();
std::vector<uint32_t> hostOutput(inputLength);
timer_start( "Copying output memory to the CPU");
THROW_IF_ERROR(cudaMemcpy(hostOutput.data(), deviceOutput, byteCount,
cudaMemcpyDeviceToHost));
timer_stop();
auto expected = compute_output(hostInput, inputLength);
verify(expected, hostOutput);
cudaFree(deviceInput);
cudaFree(deviceOutput);
return 0;
}
TEST_CASE("Scatter", "[scatter]") {
SECTION("[inputSize:1024]") {
eval(1024);
}
SECTION("[inputSize:2048]") {
eval(2048);
}
SECTION("[inputSize:2047]") {
eval(2047);
}
SECTION("[inputSize:2049]") {
eval(2049);
}
SECTION("[inputSize:9101]") {
eval(9101);
}
SECTION("[inputSize:9910]") {
eval(9910);
}
SECTION("[inputSize:8192]") {
eval(8192);
}
SECTION("[inputSize:8193]") {
eval(8193);
}
SECTION("[inputSize:8191]") {
eval(8191);
}
SECTION("[inputSize:16191]") {
eval(16191);
}
}
|
ec9e88f810a33479e51342dbfc7c3eafd1b7eac7.hip | // !!! This is a file automatically generated by hipify!!!
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void __global__ update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void update(float *values_gpu,int tpoints,int nsteps)
{
int threadID;
threadID=blockIdx.x*blockDim.x+threadIdx.x;
if(threadID<=tpoints){
float values_t;
float new_val;
float old_val;
float x,fac,tmp;
fac=2.0*PI;
tmp=tpoints-1;
x=(float)(threadID-1)/tmp;
values_t=sin(fac*x);
old_val=values_t;
for(int i=1;i<=nsteps;i++){
if((threadID==1)||(threadID==tpoints))
new_val=0.0;
else
new_val=(2.0*values_t)-old_val+(0.09*(-2.0*values_t));
old_val=values_t;
values_t=new_val;
}
values_gpu[threadID]=values_t;
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
float *values_gpu;
int size;
check_param();
printf("Initializing points on the line...\n");
//init_line();
printf("Updating all points for all time steps...\n");
int count;
int maxThreadsPerBlock;
hipGetDeviceCount(&count);
int i;
for(i=0;i<count;i++){
hipDeviceProp_t prop;
if(hipGetDeviceProperties(&prop,i)== hipSuccess){
maxThreadsPerBlock=prop.maxThreadsPerBlock;
}
}
hipSetDevice(i);
size=(1+tpoints)*sizeof(float);
hipMalloc((void**)&values_gpu,size);
hipLaunchKernelGGL(( update), dim3((tpoints/maxThreadsPerBlock)),dim3(maxThreadsPerBlock), 0, 0, values_gpu,tpoints,nsteps);
hipMemcpy(values,values_gpu,size,hipMemcpyDeviceToHost);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
| ec9e88f810a33479e51342dbfc7c3eafd1b7eac7.cu | /**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void __global__ update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
void do_math(int i)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
newval[i] = (2.0 * values[i]) - oldval[i] + (sqtau * (-2.0)*values[i]);
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void update(float *values_gpu,int tpoints,int nsteps)
{
int threadID;
threadID=blockIdx.x*blockDim.x+threadIdx.x;
if(threadID<=tpoints){
float values_t;
float new_val;
float old_val;
float x,fac,tmp;
fac=2.0*PI;
tmp=tpoints-1;
x=(float)(threadID-1)/tmp;
values_t=sin(fac*x);
old_val=values_t;
for(int i=1;i<=nsteps;i++){
if((threadID==1)||(threadID==tpoints))
new_val=0.0;
else
new_val=(2.0*values_t)-old_val+(0.09*(-2.0*values_t));
old_val=values_t;
values_t=new_val;
}
values_gpu[threadID]=values_t;
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
float *values_gpu;
int size;
check_param();
printf("Initializing points on the line...\n");
//init_line();
printf("Updating all points for all time steps...\n");
int count;
int maxThreadsPerBlock;
cudaGetDeviceCount(&count);
int i;
for(i=0;i<count;i++){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,i)== cudaSuccess){
maxThreadsPerBlock=prop.maxThreadsPerBlock;
}
}
cudaSetDevice(i);
size=(1+tpoints)*sizeof(float);
cudaMalloc((void**)&values_gpu,size);
update<<<(tpoints/maxThreadsPerBlock),maxThreadsPerBlock>>>(values_gpu,tpoints,nsteps);
cudaMemcpy(values,values_gpu,size,cudaMemcpyDeviceToHost);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
44b4ffc12e5ad33c22a4eea9ee7e411ff50a4cfb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/gather.hpp>
#include <groupby/sort/group_single_pass_reduction_util.cuh>
#include <thrust/transform.h>
namespace cudf {
namespace groupby {
namespace detail {
std::unique_ptr<column> group_argmax(column_view const& values,
size_type num_groups,
rmm::device_vector<size_type> const& group_labels,
column_view const& key_sort_order,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto indices = type_dispatcher(values.type(),
reduce_functor<aggregation::ARGMAX>{},
values,
num_groups,
group_labels,
rmm::mr::get_default_resource(),
stream);
// The functor returns the index of maximum in the sorted values.
// We need the index of maximum in the original unsorted values.
// So use indices to gather the sort order used to sort `values`.
// Gather map cannot be null so we make a view with the mask removed.
// The values in data buffer of indices corresponding to null values was
// initialized to ARGMAX_SENTINEL which is an out of bounds index value (-1)
// and causes the gathered value to be null.
column_view null_removed_indices(
data_type(type_to_id<size_type>()),
indices->size(),
static_cast<void const*>(indices->view().template data<size_type>()));
auto result_table =
cudf::detail::gather(table_view({key_sort_order}),
null_removed_indices,
indices->nullable() ? cudf::detail::out_of_bounds_policy::IGNORE
: cudf::detail::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
return std::move(result_table->release()[0]);
}
} // namespace detail
} // namespace groupby
} // namespace cudf
| 44b4ffc12e5ad33c22a4eea9ee7e411ff50a4cfb.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/detail/gather.hpp>
#include <groupby/sort/group_single_pass_reduction_util.cuh>
#include <thrust/transform.h>
namespace cudf {
namespace groupby {
namespace detail {
std::unique_ptr<column> group_argmax(column_view const& values,
size_type num_groups,
rmm::device_vector<size_type> const& group_labels,
column_view const& key_sort_order,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto indices = type_dispatcher(values.type(),
reduce_functor<aggregation::ARGMAX>{},
values,
num_groups,
group_labels,
rmm::mr::get_default_resource(),
stream);
// The functor returns the index of maximum in the sorted values.
// We need the index of maximum in the original unsorted values.
// So use indices to gather the sort order used to sort `values`.
// Gather map cannot be null so we make a view with the mask removed.
// The values in data buffer of indices corresponding to null values was
// initialized to ARGMAX_SENTINEL which is an out of bounds index value (-1)
// and causes the gathered value to be null.
column_view null_removed_indices(
data_type(type_to_id<size_type>()),
indices->size(),
static_cast<void const*>(indices->view().template data<size_type>()));
auto result_table =
cudf::detail::gather(table_view({key_sort_order}),
null_removed_indices,
indices->nullable() ? cudf::detail::out_of_bounds_policy::IGNORE
: cudf::detail::out_of_bounds_policy::NULLIFY,
cudf::detail::negative_index_policy::NOT_ALLOWED,
mr,
stream);
return std::move(result_table->release()[0]);
}
} // namespace detail
} // namespace groupby
} // namespace cudf
|
8bde3e00b8bd0fd07a1fa15266c6ee447c2021de.hip | // !!! This is a file automatically generated by hipify!!!
// Using CUDA device to calculate pi
#include <stdio.h>
#include <hip/hip_runtime.h>
extern "C" double getTime(void);
#define NBIN 1000000000 // Number of bins
// Kernel that executes on the CUDA device
__global__ void cal_pi(double *sum, int nbin, double step, int nthreads, int nblocks) {
int i;
double x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
// Main routine that executes on the host
int main(void) {
const int SIZE_BLOCK = 8;
const int SIZE_THREAD = 10;
int NUM_BLOCK_ARRAY[SIZE_BLOCK] = {60, 120, 180, 240, 300, 360, 420, 600};
int NUM_THREAD_ARRAY[SIZE_THREAD] = {16, 32, 48, 64, 80, 96, 112, 128, 144, 160};
for (int i = 0; i < SIZE_BLOCK; i++) {
for (int j = 0; j < SIZE_THREAD; j++) {
int NUM_BLOCK = NUM_BLOCK_ARRAY[i]; // Number of thread blocks
int NUM_THREAD = NUM_THREAD_ARRAY[j]; // Number of threads per block
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions
double *sumHost, *sumDev; // Pointer to host & device arrays
double pi = 0;
int tid;
double step = 1.0/NBIN; // Step size
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(double); //Array memory size
sumHost = (double *)malloc(size); // Allocate array on host
hipMalloc((void **) &sumDev, size); // Allocate array on device
double start = getTime();
// Initialize array in device to 0
hipMemset(sumDev, 0, size);
// Do calculation on device
hipLaunchKernelGGL(( cal_pi) , dim3(dimGrid), dim3(dimBlock), 0, 0, sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel
// Retrieve result from device and store it in host array
hipMemcpy(sumHost, sumDev, size, hipMemcpyDeviceToHost);
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
// Print results
double delta = getTime() - start;
printf("%d %d\n", NUM_BLOCK, NUM_THREAD);
printf("PI = %.16g computed in %.4g seconds\n", pi, delta);
// Cleanup
free(sumHost);
hipFree(sumDev);
}
}
return 0;
}
| 8bde3e00b8bd0fd07a1fa15266c6ee447c2021de.cu | // Using CUDA device to calculate pi
#include <stdio.h>
#include <cuda.h>
extern "C" double getTime(void);
#define NBIN 1000000000 // Number of bins
// Kernel that executes on the CUDA device
__global__ void cal_pi(double *sum, int nbin, double step, int nthreads, int nblocks) {
int i;
double x;
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
for (i=idx; i< nbin; i+=nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
// Main routine that executes on the host
int main(void) {
const int SIZE_BLOCK = 8;
const int SIZE_THREAD = 10;
int NUM_BLOCK_ARRAY[SIZE_BLOCK] = {60, 120, 180, 240, 300, 360, 420, 600};
int NUM_THREAD_ARRAY[SIZE_THREAD] = {16, 32, 48, 64, 80, 96, 112, 128, 144, 160};
for (int i = 0; i < SIZE_BLOCK; i++) {
for (int j = 0; j < SIZE_THREAD; j++) {
int NUM_BLOCK = NUM_BLOCK_ARRAY[i]; // Number of thread blocks
int NUM_THREAD = NUM_THREAD_ARRAY[j]; // Number of threads per block
dim3 dimGrid(NUM_BLOCK,1,1); // Grid dimensions
dim3 dimBlock(NUM_THREAD,1,1); // Block dimensions
double *sumHost, *sumDev; // Pointer to host & device arrays
double pi = 0;
int tid;
double step = 1.0/NBIN; // Step size
size_t size = NUM_BLOCK*NUM_THREAD*sizeof(double); //Array memory size
sumHost = (double *)malloc(size); // Allocate array on host
cudaMalloc((void **) &sumDev, size); // Allocate array on device
double start = getTime();
// Initialize array in device to 0
cudaMemset(sumDev, 0, size);
// Do calculation on device
cal_pi <<<dimGrid, dimBlock>>> (sumDev, NBIN, step, NUM_THREAD, NUM_BLOCK); // call CUDA kernel
// Retrieve result from device and store it in host array
cudaMemcpy(sumHost, sumDev, size, cudaMemcpyDeviceToHost);
for(tid=0; tid<NUM_THREAD*NUM_BLOCK; tid++)
pi += sumHost[tid];
pi *= step;
// Print results
double delta = getTime() - start;
printf("%d %d\n", NUM_BLOCK, NUM_THREAD);
printf("PI = %.16g computed in %.4g seconds\n", pi, delta);
// Cleanup
free(sumHost);
cudaFree(sumDev);
}
}
return 0;
}
|
ee2658f50b609ac65923d48c5d4429528b4d557f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include "util.hpp"
__host__ __device__
double f(double x) {
return exp(cos(x))-2;
};
__host__ __device__
double fp(double x) {
return -sin(x) * exp(cos(x));
};
// implements newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
void newton_host(int n, double *x) {
for(int i=0; i<n; ++i) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
// TODO : implement newton_device() kernel that performs the work in newton_host
// in parallel on the GPU
__global__
void newton_device(int n, double *x) {
auto i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= n)
return;
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
std::cout << "memory copy overlap test of length n = " << n
<< " : " << size_in_bytes/1e9 << "MB\n";
hipInit(0);
double* xd = malloc_device<double>(n);
double* xh = malloc_host<double>(n, 1.5);
double* x = malloc_host<double>(n);
// compute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (n+block_dim-1)/block_dim;
auto time_h2d = -get_time();
copy_to_device(xh, xd, n);
time_h2d += get_time();
hipDeviceSynchronize();
auto time_kernel = -get_time();
// TODO: launch kernel (use block_dim and grid_dim calculated above)
hipLaunchKernelGGL(( newton_device), dim3(grid_dim), dim3(block_dim), 0, 0, n, xd);
hipDeviceSynchronize();
time_kernel += get_time();
auto time_d2h = -get_time();
copy_to_host(xd, x, n);
time_d2h += get_time();
std::cout << "-------\ntimings\n-------\n";
std::cout << "H2D : " << time_h2d << " s\n";
std::cout << "D2H : " << time_d2h << " s\n";
std::cout << "kernel : " << time_kernel << " s\n";
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors\n";
else std::cout << "\n============ PASSED\n";
hipFree(xd);
free(xh);
free(x);
return 0;
}
| ee2658f50b609ac65923d48c5d4429528b4d557f.cu | #include <iostream>
#include <cuda.h>
#include "util.hpp"
__host__ __device__
double f(double x) {
return exp(cos(x))-2;
};
__host__ __device__
double fp(double x) {
return -sin(x) * exp(cos(x));
};
// implements newton solve for
// f(x) = 0
// where
// f(x) = exp(cos(x)) - 2
void newton_host(int n, double *x) {
for(int i=0; i<n; ++i) {
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
// TODO : implement newton_device() kernel that performs the work in newton_host
// in parallel on the GPU
__global__
void newton_device(int n, double *x) {
auto i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= n)
return;
auto x0 = x[i];
for(int iter=0; iter<5; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
int main(int argc, char** argv) {
size_t pow = read_arg(argc, argv, 1, 20);
size_t n = 1 << pow;
auto size_in_bytes = n * sizeof(double);
std::cout << "memory copy overlap test of length n = " << n
<< " : " << size_in_bytes/1e9 << "MB\n";
cuInit(0);
double* xd = malloc_device<double>(n);
double* xh = malloc_host<double>(n, 1.5);
double* x = malloc_host<double>(n);
// compute kernel launch configuration
auto block_dim = 128;
auto grid_dim = (n+block_dim-1)/block_dim;
auto time_h2d = -get_time();
copy_to_device(xh, xd, n);
time_h2d += get_time();
cudaDeviceSynchronize();
auto time_kernel = -get_time();
// TODO: launch kernel (use block_dim and grid_dim calculated above)
newton_device<<<grid_dim, block_dim>>>(n, xd);
cudaDeviceSynchronize();
time_kernel += get_time();
auto time_d2h = -get_time();
copy_to_host(xd, x, n);
time_d2h += get_time();
std::cout << "-------\ntimings\n-------\n";
std::cout << "H2D : " << time_h2d << " s\n";
std::cout << "D2H : " << time_d2h << " s\n";
std::cout << "kernel : " << time_kernel << " s\n";
// check for errors
auto errors = 0;
for(auto i=0; i<n; ++i) {
if(std::fabs(f(x[i]))>1e-10) {
errors++;
}
}
if(errors>0) std::cout << "\n============ FAILED with " << errors << " errors\n";
else std::cout << "\n============ PASSED\n";
cudaFree(xd);
free(xh);
free(x);
return 0;
}
|
72362adfc35591ab4a9a08a40bc07169c93ffafe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cutil_inline.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific common definitions
////////////////////////////////////////////////////////////////////////////////
//Data type used for input data fetches
typedef uint4 data_t;
//May change on future hardware, so better parametrize the code
#define SHARED_MEMORY_BANKS 16
////////////////////////////////////////////////////////////////////////////////
// Main computation pass: compute gridDim.x partial histograms
////////////////////////////////////////////////////////////////////////////////
//Count a byte into shared-memory storage
inline __device__ void addByte(uchar *s_ThreadBase, uint data){
s_ThreadBase[UMUL(data, HISTOGRAM64_THREADBLOCK_SIZE)]++;
}
//Count four bytes of a word
inline __device__ void addWord(uchar *s_ThreadBase, uint data){
//Only higher 6 bits of each byte matter, as this is a 64-bin histogram
addByte(s_ThreadBase, (data >> 2) & 0x3FU);
addByte(s_ThreadBase, (data >> 10) & 0x3FU);
addByte(s_ThreadBase, (data >> 18) & 0x3FU);
addByte(s_ThreadBase, (data >> 26) & 0x3FU);
}
__global__ void histogram64Kernel(uint *d_PartialHistograms, data_t *d_Data, uint dataCount){
//Encode thread index in order to avoid bank conflicts in s_Hist[] access:
//each group of SHARED_MEMORY_BANKS threads accesses consecutive shared memory banks
//and the same bytes [0..3] within the banks
//Because of this permutation block size should be a multiple of 4 * SHARED_MEMORY_BANKS
const uint threadPos =
( (threadIdx.x & ~(SHARED_MEMORY_BANKS * 4 - 1)) << 0 ) |
( (threadIdx.x & (SHARED_MEMORY_BANKS - 1)) << 2 ) |
( (threadIdx.x & (SHARED_MEMORY_BANKS * 3 )) >> 4 );
//Per-thread histogram storage
__shared__ uchar s_Hist[HISTOGRAM64_THREADBLOCK_SIZE * HISTOGRAM64_BIN_COUNT];
uchar *s_ThreadBase = s_Hist + threadPos;
//Initialize shared memory (writing 32-bit words)
#pragma unroll
for(uint i = 0; i < (HISTOGRAM64_BIN_COUNT / 4); i++)
((uint *)s_Hist)[threadIdx.x + i * HISTOGRAM64_THREADBLOCK_SIZE] = 0;
//Read data from global memory and submit to the shared-memory histogram
//Since histogram counters are byte-sized, every single thread can't do more than 255 submission
__syncthreads();
for(uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x)){
data_t data = d_Data[pos];
addWord(s_ThreadBase, data.x);
addWord(s_ThreadBase, data.y);
addWord(s_ThreadBase, data.z);
addWord(s_ThreadBase, data.w);
}
//Accumulate per-thread histograms into per-block and write to global memory
__syncthreads();
if(threadIdx.x < HISTOGRAM64_BIN_COUNT){
uchar *s_HistBase = s_Hist + UMUL(threadIdx.x, HISTOGRAM64_THREADBLOCK_SIZE);
uint sum = 0;
uint pos = 4 * (threadIdx.x & (SHARED_MEMORY_BANKS - 1));
#pragma unroll
for(uint i = 0; i < (HISTOGRAM64_THREADBLOCK_SIZE / 4); i++){
sum +=
s_HistBase[pos + 0] +
s_HistBase[pos + 1] +
s_HistBase[pos + 2] +
s_HistBase[pos + 3];
pos = (pos + 4) & (HISTOGRAM64_THREADBLOCK_SIZE - 1);
}
d_PartialHistograms[blockIdx.x * HISTOGRAM64_BIN_COUNT + threadIdx.x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram64() output
// Run one threadblock per bin; each threadbock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram64
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram64Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
){
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
uint sum = 0;
for(uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM64_BIN_COUNT];
data[threadIdx.x] = sum;
for(uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
////////////////////////////////////////////////////////////////////////////////
// CPU interface to GPU histogram calculator
////////////////////////////////////////////////////////////////////////////////
//histogram64kernel() intermediate results buffer
//MAX_PARTIAL_HISTOGRAM64_COUNT == 32768 and HISTOGRAM64_THREADBLOCK_SIZE == 64
//amounts to max. 480MB of input data
static const uint MAX_PARTIAL_HISTOGRAM64_COUNT = 32768;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram64(void){
assert( HISTOGRAM64_THREADBLOCK_SIZE % (4 * SHARED_MEMORY_BANKS) == 0 );
cutilSafeCall( hipMalloc((void **)&d_PartialHistograms, MAX_PARTIAL_HISTOGRAM64_COUNT * HISTOGRAM64_BIN_COUNT * sizeof(uint)) );
}
//Internal memory deallocation
extern "C" void closeHistogram64(void){
cutilSafeCall( hipFree(d_PartialHistograms) );
}
//Round a / b to nearest higher integer value
inline uint iDivUp(uint a, uint b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Snap a to nearest lower multiple of b
inline uint iSnapDown(uint a, uint b){
return a - a % b;
}
extern "C" void histogram64(
uint *d_Histogram,
void *d_Data,
uint byteCount
){
const uint histogramCount = iDivUp(byteCount, HISTOGRAM64_THREADBLOCK_SIZE * iSnapDown(255, sizeof(data_t)));
assert( byteCount % sizeof(data_t) == 0 );
assert( histogramCount <= MAX_PARTIAL_HISTOGRAM64_COUNT );
hipLaunchKernelGGL(( histogram64Kernel), dim3(histogramCount), dim3(HISTOGRAM64_THREADBLOCK_SIZE), 0, 0,
d_PartialHistograms,
(data_t *)d_Data,
byteCount / sizeof(data_t)
);
cutilCheckMsg("histogram64Kernel() execution failed\n");
hipLaunchKernelGGL(( mergeHistogram64Kernel), dim3(HISTOGRAM64_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, 0,
d_Histogram,
d_PartialHistograms,
histogramCount
);
cutilCheckMsg("mergeHistogram64() execution failed\n");
}
| 72362adfc35591ab4a9a08a40bc07169c93ffafe.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cutil_inline.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific common definitions
////////////////////////////////////////////////////////////////////////////////
//Data type used for input data fetches
typedef uint4 data_t;
//May change on future hardware, so better parametrize the code
#define SHARED_MEMORY_BANKS 16
////////////////////////////////////////////////////////////////////////////////
// Main computation pass: compute gridDim.x partial histograms
////////////////////////////////////////////////////////////////////////////////
//Count a byte into shared-memory storage
inline __device__ void addByte(uchar *s_ThreadBase, uint data){
s_ThreadBase[UMUL(data, HISTOGRAM64_THREADBLOCK_SIZE)]++;
}
//Count four bytes of a word
inline __device__ void addWord(uchar *s_ThreadBase, uint data){
//Only higher 6 bits of each byte matter, as this is a 64-bin histogram
addByte(s_ThreadBase, (data >> 2) & 0x3FU);
addByte(s_ThreadBase, (data >> 10) & 0x3FU);
addByte(s_ThreadBase, (data >> 18) & 0x3FU);
addByte(s_ThreadBase, (data >> 26) & 0x3FU);
}
__global__ void histogram64Kernel(uint *d_PartialHistograms, data_t *d_Data, uint dataCount){
//Encode thread index in order to avoid bank conflicts in s_Hist[] access:
//each group of SHARED_MEMORY_BANKS threads accesses consecutive shared memory banks
//and the same bytes [0..3] within the banks
//Because of this permutation block size should be a multiple of 4 * SHARED_MEMORY_BANKS
const uint threadPos =
( (threadIdx.x & ~(SHARED_MEMORY_BANKS * 4 - 1)) << 0 ) |
( (threadIdx.x & (SHARED_MEMORY_BANKS - 1)) << 2 ) |
( (threadIdx.x & (SHARED_MEMORY_BANKS * 3 )) >> 4 );
//Per-thread histogram storage
__shared__ uchar s_Hist[HISTOGRAM64_THREADBLOCK_SIZE * HISTOGRAM64_BIN_COUNT];
uchar *s_ThreadBase = s_Hist + threadPos;
//Initialize shared memory (writing 32-bit words)
#pragma unroll
for(uint i = 0; i < (HISTOGRAM64_BIN_COUNT / 4); i++)
((uint *)s_Hist)[threadIdx.x + i * HISTOGRAM64_THREADBLOCK_SIZE] = 0;
//Read data from global memory and submit to the shared-memory histogram
//Since histogram counters are byte-sized, every single thread can't do more than 255 submission
__syncthreads();
for(uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x)){
data_t data = d_Data[pos];
addWord(s_ThreadBase, data.x);
addWord(s_ThreadBase, data.y);
addWord(s_ThreadBase, data.z);
addWord(s_ThreadBase, data.w);
}
//Accumulate per-thread histograms into per-block and write to global memory
__syncthreads();
if(threadIdx.x < HISTOGRAM64_BIN_COUNT){
uchar *s_HistBase = s_Hist + UMUL(threadIdx.x, HISTOGRAM64_THREADBLOCK_SIZE);
uint sum = 0;
uint pos = 4 * (threadIdx.x & (SHARED_MEMORY_BANKS - 1));
#pragma unroll
for(uint i = 0; i < (HISTOGRAM64_THREADBLOCK_SIZE / 4); i++){
sum +=
s_HistBase[pos + 0] +
s_HistBase[pos + 1] +
s_HistBase[pos + 2] +
s_HistBase[pos + 3];
pos = (pos + 4) & (HISTOGRAM64_THREADBLOCK_SIZE - 1);
}
d_PartialHistograms[blockIdx.x * HISTOGRAM64_BIN_COUNT + threadIdx.x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram64() output
// Run one threadblock per bin; each threadbock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram64
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram64Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
){
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
uint sum = 0;
for(uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM64_BIN_COUNT];
data[threadIdx.x] = sum;
for(uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
////////////////////////////////////////////////////////////////////////////////
// CPU interface to GPU histogram calculator
////////////////////////////////////////////////////////////////////////////////
//histogram64kernel() intermediate results buffer
//MAX_PARTIAL_HISTOGRAM64_COUNT == 32768 and HISTOGRAM64_THREADBLOCK_SIZE == 64
//amounts to max. 480MB of input data
static const uint MAX_PARTIAL_HISTOGRAM64_COUNT = 32768;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram64(void){
assert( HISTOGRAM64_THREADBLOCK_SIZE % (4 * SHARED_MEMORY_BANKS) == 0 );
cutilSafeCall( cudaMalloc((void **)&d_PartialHistograms, MAX_PARTIAL_HISTOGRAM64_COUNT * HISTOGRAM64_BIN_COUNT * sizeof(uint)) );
}
//Internal memory deallocation
extern "C" void closeHistogram64(void){
cutilSafeCall( cudaFree(d_PartialHistograms) );
}
//Round a / b to nearest higher integer value
inline uint iDivUp(uint a, uint b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Snap a to nearest lower multiple of b
inline uint iSnapDown(uint a, uint b){
return a - a % b;
}
extern "C" void histogram64(
uint *d_Histogram,
void *d_Data,
uint byteCount
){
const uint histogramCount = iDivUp(byteCount, HISTOGRAM64_THREADBLOCK_SIZE * iSnapDown(255, sizeof(data_t)));
assert( byteCount % sizeof(data_t) == 0 );
assert( histogramCount <= MAX_PARTIAL_HISTOGRAM64_COUNT );
histogram64Kernel<<<histogramCount, HISTOGRAM64_THREADBLOCK_SIZE>>>(
d_PartialHistograms,
(data_t *)d_Data,
byteCount / sizeof(data_t)
);
cutilCheckMsg("histogram64Kernel() execution failed\n");
mergeHistogram64Kernel<<<HISTOGRAM64_BIN_COUNT, MERGE_THREADBLOCK_SIZE>>>(
d_Histogram,
d_PartialHistograms,
histogramCount
);
cutilCheckMsg("mergeHistogram64() execution failed\n");
}
|
8c5a9b767f3a4883873cbd2dd667a305e851508f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "takeLog.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *env = NULL;
hipMalloc(&env, XSIZE*YSIZE);
int nhalf = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
takeLog), dim3(gridBlock),dim3(threadBlock), 0, 0, input,env,nhalf);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
takeLog), dim3(gridBlock),dim3(threadBlock), 0, 0, input,env,nhalf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
takeLog), dim3(gridBlock),dim3(threadBlock), 0, 0, input,env,nhalf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8c5a9b767f3a4883873cbd2dd667a305e851508f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "takeLog.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *env = NULL;
cudaMalloc(&env, XSIZE*YSIZE);
int nhalf = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
takeLog<<<gridBlock,threadBlock>>>(input,env,nhalf);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
takeLog<<<gridBlock,threadBlock>>>(input,env,nhalf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
takeLog<<<gridBlock,threadBlock>>>(input,env,nhalf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c1b31e30280e19307be99959fb44ef9d46b8735d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _HISTOGRAM_KERNEL_H_
#define _HISTOGRAM_KERNEL_H_
__global__ void histogram_kernel_fast(int *input_data, int *histogram, int num_elements, int histogram_size)
{
__shared__ unsigned int s[HISTOGRAM_SIZE];
// Initialize the shared memory area
if(threadIdx.x < histogram_size)
s[threadIdx.x] = 0;
__syncthreads();
unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
while(offset < num_elements){
atomicAdd(&s[input_data[offset]], 1);
offset += stride;
}
__syncthreads();
if(threadIdx.x < histogram_size){
atomicAdd(&histogram[threadIdx.x], s[threadIdx.x]);
}
}
__global__ void histogram_kernel_slow(int *input_data, int *histogram, int num_elements, int histogram_size)
{
unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
while(offset < num_elements){
atomicAdd(&histogram[input_data[offset]], 1);
offset += stride;
}
}
#endif // #ifndef _HISTOGRAM_KERNEL_H
| c1b31e30280e19307be99959fb44ef9d46b8735d.cu | #ifndef _HISTOGRAM_KERNEL_H_
#define _HISTOGRAM_KERNEL_H_
__global__ void histogram_kernel_fast(int *input_data, int *histogram, int num_elements, int histogram_size)
{
__shared__ unsigned int s[HISTOGRAM_SIZE];
// Initialize the shared memory area
if(threadIdx.x < histogram_size)
s[threadIdx.x] = 0;
__syncthreads();
unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
while(offset < num_elements){
atomicAdd(&s[input_data[offset]], 1);
offset += stride;
}
__syncthreads();
if(threadIdx.x < histogram_size){
atomicAdd(&histogram[threadIdx.x], s[threadIdx.x]);
}
}
__global__ void histogram_kernel_slow(int *input_data, int *histogram, int num_elements, int histogram_size)
{
unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
while(offset < num_elements){
atomicAdd(&histogram[input_data[offset]], 1);
offset += stride;
}
}
#endif // #ifndef _HISTOGRAM_KERNEL_H
|
148fadcca205f582c21a445f672dc1c9ea79fb1a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define block_size 512
__global__ void calculation( int *a,
int *b,
int *c,
int constant,
int vector_size ) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
__shared__ int sharedDataA[block_size+2]; // border for the block are needed
int curr_b;
// Populate border
if (threadIdx.x == 0){
sharedDataA[0] = a[tid>0?tid-1:(vector_size-1)];
} else if (threadIdx.x == block_size - 1){
sharedDataA[block_size + 1] = a[tid<(vector_size-1)?tid+1:0];
} else if (tid == vector_size - 1){
sharedDataA[threadIdx.x + 2] = a[0];
}
// How can we avoid these ifs??? Tip: Padding
if (tid < vector_size){
// Populate shared data for A
sharedDataA[threadIdx.x+1] = a[tid];
// Bring data from B (no need for shared)
curr_b = b[tid];
}
__syncthreads();
// Perform calculation
if (tid < vector_size){
int output_c = (sharedDataA[threadIdx.x]-sharedDataA[threadIdx.x+2])*curr_b; //Use neighbors from shared data
output_c += sharedDataA[threadIdx.x+1]*constant;
// Write result
c[tid] = output_c;
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 2) {
// Tell the user how to run the program
printf ("Usage: %s vector_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
// It will be either 0 or 1
hipSetDevice(0);
// Time Variables
hipEvent_t start_cpu, start_gpu;
hipEvent_t stop_cpu, stop_gpu;
hipEventCreate (&start_cpu);
hipEventCreate (&start_gpu);
hipEventCreate (&stop_cpu);
hipEventCreate (&stop_gpu);
float time;
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
int constant = 4;
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Filling up input arrays with random values between 1 and 10.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
hipEventRecord(start_cpu,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
// Read in inputs
int prev_a = a[i>0?i-1:(vector_size-1)];
int curr_a = a[i];
int post_a = a[i<(vector_size-1)?i+1:0];
int curr_b = b[i];
// Do computation
int output_c = (prev_a-post_a)*curr_b + curr_a*constant;
// Write result
c_cpu[i] = output_c;
}
hipEventRecord(stop_cpu,0);
hipEventSynchronize(stop_cpu);
hipEventElapsedTime(&time, start_cpu, stop_cpu);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
//////////////////
printf("Running parallel job.\n");
hipEventRecord(start_gpu,0);
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, vector_size * sizeof(int) );
hipMalloc( (void**)&dev_b, vector_size * sizeof(int) );
hipMalloc( (void**)&dev_c, vector_size * sizeof(int) );
// set arrays to 0
hipMemset(dev_a, 0, vector_size * sizeof(int));
hipMemset(dev_b, 0, vector_size * sizeof(int));
hipMemset(dev_c, 0, vector_size * sizeof(int));
// copy the arrays 'a' and 'b' to the GPU
hipMemcpy( dev_a, a, vector_size * sizeof(int),
hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, vector_size * sizeof(int),
hipMemcpyHostToDevice );
// run kernel
hipLaunchKernelGGL(( calculation), dim3(grid_size),dim3(block_size), 0, 0, dev_a,
dev_b,
dev_c,
constant,
vector_size );
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c_gpu, dev_c, vector_size * sizeof(int),
hipMemcpyDeviceToHost );
hipEventRecord(stop_gpu,0);
hipEventSynchronize(stop_gpu);
hipEventElapsedTime(&time, start_gpu, stop_gpu);
printf("\tParallel Job Time: %.2f ms\n", time);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free the memory allocated on the GPU
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
// free cuda events
hipEventDestroy (start_cpu);
hipEventDestroy (start_gpu);
hipEventDestroy (stop_cpu);
hipEventDestroy (stop_gpu);
// free CPU memory
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return 0;
}
| 148fadcca205f582c21a445f672dc1c9ea79fb1a.cu | #include <stdio.h>
#include <stdlib.h>
#define block_size 512
__global__ void calculation( int *a,
int *b,
int *c,
int constant,
int vector_size ) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
__shared__ int sharedDataA[block_size+2]; // border for the block are needed
int curr_b;
// Populate border
if (threadIdx.x == 0){
sharedDataA[0] = a[tid>0?tid-1:(vector_size-1)];
} else if (threadIdx.x == block_size - 1){
sharedDataA[block_size + 1] = a[tid<(vector_size-1)?tid+1:0];
} else if (tid == vector_size - 1){
sharedDataA[threadIdx.x + 2] = a[0];
}
// How can we avoid these ifs??? Tip: Padding
if (tid < vector_size){
// Populate shared data for A
sharedDataA[threadIdx.x+1] = a[tid];
// Bring data from B (no need for shared)
curr_b = b[tid];
}
__syncthreads();
// Perform calculation
if (tid < vector_size){
int output_c = (sharedDataA[threadIdx.x]-sharedDataA[threadIdx.x+2])*curr_b; //Use neighbors from shared data
output_c += sharedDataA[threadIdx.x+1]*constant;
// Write result
c[tid] = output_c;
}
}
int main( int argc, char* argv[] ) {
// Parse Input arguments
// Check the number of arguments (we only receive command + vector size)
if (argc != 2) {
// Tell the user how to run the program
printf ("Usage: %s vector_size\n", argv[0]);
// "Usage messages" are a conventional way of telling the user
// how to run a program if they enter the command incorrectly.
return 1;
}
// Set GPU Variables based on input arguments
int vector_size = atoi(argv[1]);
int grid_size = ((vector_size-1)/block_size) + 1;
// Set device that we will use for our cuda code
// It will be either 0 or 1
cudaSetDevice(0);
// Time Variables
cudaEvent_t start_cpu, start_gpu;
cudaEvent_t stop_cpu, stop_gpu;
cudaEventCreate (&start_cpu);
cudaEventCreate (&start_gpu);
cudaEventCreate (&stop_cpu);
cudaEventCreate (&stop_gpu);
float time;
// Input Arrays and variables
int *a = new int [vector_size];
int *b = new int [vector_size];
int *c_cpu = new int [vector_size];
int *c_gpu = new int [vector_size];
int constant = 4;
// Pointers in GPU memory
int *dev_a;
int *dev_b;
int *dev_c;
// fill the arrays 'a' and 'b' on the CPU
printf("Filling up input arrays with random values between 1 and 10.\n");
for (int i = 0; i < vector_size; i++) {
a[i] = rand()%10;
b[i] = rand()%10;
}
//
// CPU Calculation
//////////////////
printf("Running sequential job.\n");
cudaEventRecord(start_cpu,0);
// Calculate C in the CPU
for (int i = 0; i < vector_size; i++) {
// Read in inputs
int prev_a = a[i>0?i-1:(vector_size-1)];
int curr_a = a[i];
int post_a = a[i<(vector_size-1)?i+1:0];
int curr_b = b[i];
// Do computation
int output_c = (prev_a-post_a)*curr_b + curr_a*constant;
// Write result
c_cpu[i] = output_c;
}
cudaEventRecord(stop_cpu,0);
cudaEventSynchronize(stop_cpu);
cudaEventElapsedTime(&time, start_cpu, stop_cpu);
printf("\tSequential Job Time: %.2f ms\n", time);
//
// GPU Calculation
//////////////////
printf("Running parallel job.\n");
cudaEventRecord(start_gpu,0);
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, vector_size * sizeof(int) );
cudaMalloc( (void**)&dev_b, vector_size * sizeof(int) );
cudaMalloc( (void**)&dev_c, vector_size * sizeof(int) );
// set arrays to 0
cudaMemset(dev_a, 0, vector_size * sizeof(int));
cudaMemset(dev_b, 0, vector_size * sizeof(int));
cudaMemset(dev_c, 0, vector_size * sizeof(int));
// copy the arrays 'a' and 'b' to the GPU
cudaMemcpy( dev_a, a, vector_size * sizeof(int),
cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, vector_size * sizeof(int),
cudaMemcpyHostToDevice );
// run kernel
calculation<<<grid_size,block_size>>>( dev_a,
dev_b,
dev_c,
constant,
vector_size );
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c_gpu, dev_c, vector_size * sizeof(int),
cudaMemcpyDeviceToHost );
cudaEventRecord(stop_gpu,0);
cudaEventSynchronize(stop_gpu);
cudaEventElapsedTime(&time, start_gpu, stop_gpu);
printf("\tParallel Job Time: %.2f ms\n", time);
// compare the results
int error = 0;
for (int i = 0; i < vector_size; i++) {
if (c_cpu[i] != c_gpu[i]){
error = 1;
printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] );
}
if (error) break;
}
if (error == 0){
printf ("Correct result. No errors were found.\n");
}
// free the memory allocated on the GPU
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
// free cuda events
cudaEventDestroy (start_cpu);
cudaEventDestroy (start_gpu);
cudaEventDestroy (stop_cpu);
cudaEventDestroy (stop_gpu);
// free CPU memory
free(a);
free(b);
free(c_cpu);
free(c_gpu);
return 0;
}
|
43b5b1022b588cce5014b1ead7073a4c3775cada.hip | // !!! This is a file automatically generated by hipify!!!
//Calculo de la FFT 3D usando "hipfftPlanMany"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <hipfft.h>
#define RENGLONES 2
#define COLUMNAS 3
#define PROFUNDIDAD 4
int main()
{
int i,j,k;
int n[3] = {RENGLONES,COLUMNAS,PROFUNDIDAD};
int inembed[3] = {RENGLONES,COLUMNAS,PROFUNDIDAD};
int onembed[3] = {RENGLONES,COLUMNAS,PROFUNDIDAD};
cuFloatComplex *h_xn;
cuFloatComplex *h_xn_trans;
cuFloatComplex *h_Xk;
cuFloatComplex *h_Xk_trans;
hipfftComplex *in,*out;
//Se reserva memoria para h_xn en el host
h_xn = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para h_xn_trans en el host
h_xn_trans = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para h_Xk en el host
h_Xk = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para h_Xk_trans en el host
h_Xk_trans = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se dan valores a x[n]
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
//h_xn[i] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
h_xn[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j] = make_cuFloatComplex((float)(((k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j) + 1),(float)(0.0));
}
}
}
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_xn[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]),cuCimagf(h_xn[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]));
}
printf("\n");
}
printf("\n\n");
}
//Se saca la transpuesta del arreglo tridimensional "h_xn"
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
h_xn_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j] = make_cuFloatComplex(cuCrealf(h_xn[(k*RENGLONES*COLUMNAS)+(j*COLUMNAS)+i]),cuCimagf(h_xn[(k*RENGLONES*COLUMNAS)+(j*COLUMNAS)+i]));
}
}
}
/*
//Se imprimen los valores de entrada x[n] (matriz transpuesta)
printf("\n---ELEMENTOS DE ENTRADA x[n] (Matriz transpuesta)---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_xn_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]),cuCimagf(h_xn_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]));
}
printf("\n");
}
printf("\n\n");
}
*/
//Se reserva memoria para "in" en el device
hipMalloc((void**)&in,sizeof(hipfftComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para "out" en el device
hipMalloc((void**)&out,sizeof(hipfftComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se copian los datos de h_xn_trans >>> in
hipMemcpy(in,h_xn_trans,sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD,hipMemcpyHostToDevice);
//CUFFT plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,RENGLONES,onembed,1,RENGLONES,HIPFFT_C2C,COLUMNAS*PROFUNDIDAD);
//Ejecucion de la fft
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Se copian los datos de out >>> h_Xk
hipMemcpy(h_Xk_trans,out,sizeof(hipfftComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de salida X[k] (Matriz transpuesta h_Xk_trans)
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]),cuCimagf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]));
}
printf("\n");
}
printf("\n\n");
}
*/
//Se saca la transpuesta del arreglo tridimensional "h_Xk_trans"
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
h_Xk[(k*RENGLONES*COLUMNAS)+(j*COLUMNAS)+i] = make_cuFloatComplex(cuCrealf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]),cuCimagf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]));
}
}
}
//Se imprimen los valores de salida X[k]
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_Xk[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]),cuCimagf(h_Xk[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]));
}
printf("\n");
}
printf("\n\n");
}
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan memorias
free(h_xn);
free(h_Xk);
free(h_xn_trans);
free(h_Xk_trans);
hipFree(in);
hipFree(out);
}
| 43b5b1022b588cce5014b1ead7073a4c3775cada.cu | //Calculo de la FFT 3D usando "cufftPlanMany"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cufft.h>
#define RENGLONES 2
#define COLUMNAS 3
#define PROFUNDIDAD 4
int main()
{
int i,j,k;
int n[3] = {RENGLONES,COLUMNAS,PROFUNDIDAD};
int inembed[3] = {RENGLONES,COLUMNAS,PROFUNDIDAD};
int onembed[3] = {RENGLONES,COLUMNAS,PROFUNDIDAD};
cuFloatComplex *h_xn;
cuFloatComplex *h_xn_trans;
cuFloatComplex *h_Xk;
cuFloatComplex *h_Xk_trans;
cufftComplex *in,*out;
//Se reserva memoria para h_xn en el host
h_xn = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para h_xn_trans en el host
h_xn_trans = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para h_Xk en el host
h_Xk = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para h_Xk_trans en el host
h_Xk_trans = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se dan valores a x[n]
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
//h_xn[i] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%21));
h_xn[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j] = make_cuFloatComplex((float)(((k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j) + 1),(float)(0.0));
}
}
}
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_xn[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]),cuCimagf(h_xn[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]));
}
printf("\n");
}
printf("\n\n");
}
//Se saca la transpuesta del arreglo tridimensional "h_xn"
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
h_xn_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j] = make_cuFloatComplex(cuCrealf(h_xn[(k*RENGLONES*COLUMNAS)+(j*COLUMNAS)+i]),cuCimagf(h_xn[(k*RENGLONES*COLUMNAS)+(j*COLUMNAS)+i]));
}
}
}
/*
//Se imprimen los valores de entrada x[n] (matriz transpuesta)
printf("\n---ELEMENTOS DE ENTRADA x[n] (Matriz transpuesta)---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_xn_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]),cuCimagf(h_xn_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]));
}
printf("\n");
}
printf("\n\n");
}
*/
//Se reserva memoria para "in" en el device
cudaMalloc((void**)&in,sizeof(cufftComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se reserva memoria para "out" en el device
cudaMalloc((void**)&out,sizeof(cufftComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD);
//Se copian los datos de h_xn_trans >>> in
cudaMemcpy(in,h_xn_trans,sizeof(cuFloatComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD,cudaMemcpyHostToDevice);
//CUFFT plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,RENGLONES,onembed,1,RENGLONES,CUFFT_C2C,COLUMNAS*PROFUNDIDAD);
//Ejecucion de la fft
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Se copian los datos de out >>> h_Xk
cudaMemcpy(h_Xk_trans,out,sizeof(cufftComplex)*RENGLONES*COLUMNAS*PROFUNDIDAD,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de salida X[k] (Matriz transpuesta h_Xk_trans)
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]),cuCimagf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]));
}
printf("\n");
}
printf("\n\n");
}
*/
//Se saca la transpuesta del arreglo tridimensional "h_Xk_trans"
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<COLUMNAS;i++)
{
for(j=0;j<RENGLONES;j++)
{
h_Xk[(k*RENGLONES*COLUMNAS)+(j*COLUMNAS)+i] = make_cuFloatComplex(cuCrealf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]),cuCimagf(h_Xk_trans[(k*RENGLONES*COLUMNAS)+(i*RENGLONES)+j]));
}
}
}
//Se imprimen los valores de salida X[k]
printf("\n---ELEMENTOS DE SALIDA X[k]---\n\n");
for(k=0;k<PROFUNDIDAD;k++)
{
for(i=0;i<RENGLONES;i++)
{
for(j=0;j<COLUMNAS;j++)
{
printf(" (%f) + (%f) ",cuCrealf(h_Xk[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]),cuCimagf(h_Xk[(k*RENGLONES*COLUMNAS)+(i*COLUMNAS)+j]));
}
printf("\n");
}
printf("\n\n");
}
//Se destruye el plan
cufftDestroy(plan);
//Se liberan memorias
free(h_xn);
free(h_Xk);
free(h_xn_trans);
free(h_Xk_trans);
cudaFree(in);
cudaFree(out);
}
|
d049b208048e51e52a8628831234adf198a84e00.hip | // !!! This is a file automatically generated by hipify!!!
// CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <thrust/random.h>
#include "rasterizeKernels.h"
#include "rasterizeTools.h"
#if TORCH_HIP_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
glm::vec3* framebuffer;
fragment* depthbuffer;
int* lock;
ray* light;
int lightNum = 4;
cudaMat4* transform;
float* device_vbo;
float* device_cbo;
int* device_ibo;
float* device_nbo;
triangle* primitives;
void checkCUDAError(const char *msg) {
hipError_t err = hipGetLastError();
if( hipSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Handy dandy little hashing function that provides seeds for random number generation
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Writes a given fragment to a fragment buffer at a given location
__host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
depthbuffer[index] = frag;
}
}
//Reads a fragment from a given location in a fragment buffer
__host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return depthbuffer[index];
}else{
fragment f;
return f;
}
}
//Writes a given pixel to a pixel buffer at a given location
__host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
framebuffer[index] = value;
}
}
//Reads a pixel from a pixel buffer at a given location
__host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return framebuffer[index];
}else{
return glm::vec3(0,0,0);
}
}
//Kernel that clears a given pixel buffer with a given color
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = color;
}
}
//Kernel that clears a given fragment buffer with a given fragment
__global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: Implement a vertex shader
__global__ void vertexShadeKernel(float* vbo, int vbosize, float* nbo, int nbosize, const cudaMat4 transform){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < vbosize/3){
glm::vec3 newVertex = multiplyMV(transform, glm::vec4(vbo[3 * index], vbo[3 * index + 1], vbo[3 * index + 2], 1.0f));
glm::vec3 normal = glm::normalize(multiplyMV(transform, glm::vec4(nbo[3 * index], nbo[3 * index + 1], nbo[3 * index + 2], 1.0f)));
vbo[3 * index] = newVertex.x;
vbo[3 * index + 1] = newVertex.y;
vbo[3 * index + 2] = newVertex.z;
nbo[3 * index] = normal.x;
nbo[3 * index + 1] = normal.y;
nbo[3 * index + 2] = normal.z;
}
}
//TODO: Implement primitive assembly
__global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* nbo, int nbosize, float* cbo, int cbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index < primitivesCount){
// The actual indices of vertices are stored in the index buffer object.
const int* vertexIndex = &ibo[3 * index];
primitives[index].p0 = glm::vec3(vbo[3 * vertexIndex[0]], vbo[3 * vertexIndex[0] +1], vbo[3 * vertexIndex[0] + 2]);
primitives[index].p1 = glm::vec3(vbo[3 * vertexIndex[1]], vbo[3 * vertexIndex[1] +1], vbo[3 * vertexIndex[1] + 2]);
primitives[index].p2 = glm::vec3(vbo[3 * vertexIndex[2]], vbo[3 * vertexIndex[2] +1], vbo[3 * vertexIndex[2] + 2]);
// Load normals of vertices.
primitives[index].n0 = glm::vec3(nbo[3 * vertexIndex[0]], nbo[3 * vertexIndex[0] +1], nbo[3 * vertexIndex[0] + 2]);
primitives[index].n1 = glm::vec3(nbo[3 * vertexIndex[1]], nbo[3 * vertexIndex[1] +1], nbo[3 * vertexIndex[1] + 2]);
primitives[index].n2 = glm::vec3(nbo[3 * vertexIndex[2]], nbo[3 * vertexIndex[2] +1], nbo[3 * vertexIndex[2] + 2]);
// The size of cbo is nine, only needs to give the nine RGB values to the color in the triangle variable's color vector.
primitives[index].c0 = glm::vec3(cbo[0], cbo[1], cbo[2]);
primitives[index].c1 = glm::vec3(cbo[3], cbo[4], cbo[5]);
primitives[index].c2 = glm::vec3(cbo[6], cbo[7], cbo[8]);
}
}
//TODO: Implement a rasterization method, such as scanline.
__global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, glm::vec3 view, int* lock){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < primitivesCount){
// Initialize triangle and back face culling if the normal of z is point to the back
triangle currentTriangle = primitives[index];
if (glm::dot(currentTriangle.n0, view) > 0.0f || glm::dot(currentTriangle.n1, view) > 0.0f || glm::dot(currentTriangle.n2, view) > 0.0f )
return;
glm::vec3 normal = glm::normalize((currentTriangle.n0 + currentTriangle.n1 + currentTriangle.n2) / 3.0f);
// Add min max vectors and integers for the bounds and project the min back to the screen coordinate
glm::vec3 minPoint, maxPoint;
int minX, minY, maxX, maxY;
getAABBForTriangle(currentTriangle, minPoint, maxPoint);
scale2screen(minPoint, minX, maxY, resolution);
scale2screen(maxPoint, maxX, minY, resolution);
if (minX > resolution.x - 1 || minY > resolution.y - 1 || minX < 0 || minY < 0 )
return;
// Clipping the points outside the image inside.
minX = minX < 0 ? 0 : minX;
minY = minY < 0 ? 0 : minY;
maxX = (maxX > resolution.x - 1) ? resolution.x - 1 : maxX;
maxY = (maxY > resolution.y - 1) ? resolution.y - 1 : maxY;
// Loop and rasterize the interpolated area across the current primitive.
int idx;
float depth = 0;
for (int y = minY; y < maxY; ++ y) {
for (int x = minX; x < maxX; ++ x) {
idx = y * resolution.x + x;
glm::vec3 barycentricCoordinates = calculateBarycentricCoordinate(currentTriangle, screen2scale(x, y, resolution));
// Determine whether the current pixel is within the bounds of the current primitive
if (!isBarycentricCoordInBounds(barycentricCoordinates))
continue;
depth = getZAtCoordinate(barycentricCoordinates, currentTriangle);
bool loopFlag = true;
do {
if (atomicCAS(&lock[idx], 0, 1) == 0) {
//Depth Test
if(depth > depthbuffer[idx].position.z) {
depthbuffer[idx].position.x = screen2scale(x, y, resolution).x;
depthbuffer[idx].position.y = screen2scale(x, y, resolution).y;
depthbuffer[idx].position.z = depth;
depthbuffer[idx].normal = normal;
depthbuffer[idx].color = barycentricCoordinates.x * currentTriangle.c0 + barycentricCoordinates.y * currentTriangle.c1 + barycentricCoordinates.z * currentTriangle.c2;
}
loopFlag = false;
__threadfence();
atomicExch(&(lock[idx]), 0);
}
} while (loopFlag);
} // for x
} // for y
}
}
//TODO: Implement a fragment shader
__global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, ray* light, int lightNum, bool depthFlag, bool flatcolorFlag, int color, bool multicolorFlag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if (x > resolution.x && y > resolution.y)
return;
// Initialize the buffer and iteration time for lights
glm::vec3 frag = depthbuffer[index].color;
depthbuffer[index].color = glm::vec3(0.0f);
lightNum = multicolorFlag ? lightNum : 1;
// Shading lights on the cow.
for (int i = 0; i < lightNum; ++ i) {
// Compute the vectors of light, view and H in Blinn-Phong lighting model, referring to http://en.wikipedia.org/wiki/Blinn%E2%80%93Phong_shading_model
glm::vec3 position = depthbuffer[index].position;
glm::vec3 normal = depthbuffer[index].normal;
glm::vec3 L = glm::normalize(light[i].position - position);
glm::vec3 V = glm::normalize(- position);
glm::vec3 H = glm::normalize(L + V);
// Compute the diffusion and Blinn-Phong lighting
float diffuse = glm::max(glm::dot(L, normal), 0.0f);
float specular = glm::max(glm::pow(glm::dot(H, normal), 1000.0f), 0.0f);
// Compute final color
if (depthFlag) {
depthbuffer[index].color = depthbuffer[index].position.z * glm::vec3(1.0f);
return;
} else if (flatcolorFlag) {
switch(color) {
case(0):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * light[i].color;
break;
case(1):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.63f, 0.06f, 0.04f) * light[i].color;
break;
case(2):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.15f, 0.48f, 0.09f) * light[i].color;
break;
case(3):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.13f, 0.16f, 0.84f) * light[i].color;
break;
case(4):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.43f, 0.16f, 0.54f) * light[i].color;
break;
}
} else
depthbuffer[index].color += frag * (0.5f * diffuse + 0.5f * specular) * light[i].color;
}
depthbuffer[index].color /= multicolorFlag ? (float)lightNum * 0.3f : (float)lightNum * 0.5f;
}
//Writes fragment colors to the framebuffer
__global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer, bool antialiasing){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
if (ANTIALIASING || antialiasing) {
// Using super sampling and initialize with the central pixel, the corner flag indicates which corner the pixel is
int sampleNum = 1;
glm::vec3 sampling = depthbuffer[index].color;
// Left side
sampleNum += (x > 0) ? 1 : 0;
sampling += (x > 0) ? depthbuffer[index - 1].color : glm::vec3(0.0f);
// Upper side
sampleNum += (y > 0) ? 1 : 0;
sampling += (y > 0) ? depthbuffer[index - (int)resolution.x].color : glm::vec3(0.0f);
// Right side
sampleNum += (x < resolution.x - 1) ? 1 : 0;
sampling += (x < resolution.x - 1) ? depthbuffer[index + 1].color : glm::vec3(0.0f);
// Bottom side
sampleNum += (x < resolution.y - 1) ? 1 : 0;
sampling += (x < resolution.y - 1) ? depthbuffer[index + (int)resolution.x].color : glm::vec3(0.0f);
// Four corners
sampleNum += (sampleNum == 5) ? 4 : 0;
sampling += (sampleNum == 9) ? (depthbuffer[index - (int)resolution.x - 1].color + depthbuffer[index - (int)resolution.x + 1].color
+ depthbuffer[index + (int)resolution.x - 1].color + depthbuffer[index + (int)resolution.x + 1].color) : glm::vec3(0.0f);
if (sampleNum == 9) {
framebuffer[index] = sampling / 9.0f;
return;
}
// Two corners
sampleNum += (sampleNum == 4) ? 2 : 0;
if (sampleNum == 6) {
sampling += x == 0 ? depthbuffer[index - (int)resolution.x + 1].color + depthbuffer[index + (int)resolution.x + 1].color: glm::vec3(0.0f);
sampling += y == 0 ? depthbuffer[index + (int)resolution.x - 1].color + depthbuffer[index + (int)resolution.x + 1].color: glm::vec3(0.0f);
sampling += x == resolution.x ? depthbuffer[index - (int)resolution.x - 1].color + depthbuffer[index + (int)resolution.x - 1].color: glm::vec3(0.0f);
sampling += y == resolution.y ? depthbuffer[index - (int)resolution.x - 1].color + depthbuffer[index - (int)resolution.x + 1].color: glm::vec3(0.0f);
framebuffer[index] = sampling / 6.0f;
return;
}
// One corner
sampleNum += (sampleNum == 3) ? 1 : 0;
if((x == 0) && (y == 0)) {
sampling += depthbuffer[index + (int)resolution.x + 1].color;
} else if ((x == resolution.x -1) && (y == 0)) {
sampling += depthbuffer[index + (int)resolution.x - 1].color;
} else if ((y == resolution.y -1) && (x == 0)) {
sampling += depthbuffer[index - (int)resolution.x + 1].color;
} else {
sampling += depthbuffer[index - (int)resolution.x - 1].color;
}
framebuffer[index] = sampling / 4.0f;
} else
framebuffer[index] = depthbuffer[index].color;
}
}
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float* nbo, int nbosize, const cudaMat4* transform, glm::vec3 viewPort, bool antialiasing, bool depthFlag, bool flatcolorFlag, int color, bool multicolorFlag){
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
// set up framebuffer
framebuffer = NULL;
hipMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3));
// set up depthbuffer
depthbuffer = NULL;
hipMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment));
// set up lock
lock = NULL;
hipMalloc((void**)&lock, (int)resolution.x * (int)resolution.y * sizeof(int));
// set up light
light = NULL;
hipMalloc((void**)&light, lightNum * sizeof(ray));
//kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states
hipLaunchKernelGGL(( clearImage), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, framebuffer, glm::vec3(0,0,0));
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,-10000);
hipLaunchKernelGGL(( clearDepthBuffer), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer,frag);
//------------------------------
//memory stuff
//------------------------------
primitives = NULL;
hipMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle));
// Index Buffer Object
device_ibo = NULL;
hipMalloc((void**)&device_ibo, ibosize*sizeof(int));
hipMemcpy( device_ibo, ibo, ibosize*sizeof(int), hipMemcpyHostToDevice);
// Vertex Buffer Object
device_vbo = NULL;
hipMalloc((void**)&device_vbo, vbosize*sizeof(float));
hipMemcpy( device_vbo, vbo, vbosize*sizeof(float), hipMemcpyHostToDevice);
// Color Buffer Object
device_cbo = NULL;
hipMalloc((void**)&device_cbo, cbosize*sizeof(float));
hipMemcpy( device_cbo, cbo, cbosize*sizeof(float), hipMemcpyHostToDevice);
// Normal Buffer Object
device_nbo = NULL;
hipMalloc((void**)&device_nbo, nbosize*sizeof(float));
hipMemcpy( device_nbo, nbo, nbosize*sizeof(float), hipMemcpyHostToDevice);
tileSize = 32;
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
//------------------------------
//vertex shader
//------------------------------
hipLaunchKernelGGL(( vertexShadeKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_nbo, nbosize, *transform);
hipDeviceSynchronize();
//------------------------------
//primitive assembly
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
hipLaunchKernelGGL(( primitiveAssemblyKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, device_vbo, vbosize, device_nbo, nbosize, device_cbo, cbosize, device_ibo, ibosize, primitives);
hipDeviceSynchronize();
//------------------------------
//rasterization
//------------------------------
hipLaunchKernelGGL(( rasterizationKernel), dim3(primitiveBlocks), dim3(tileSize), 0, 0, primitives, ibosize/3, depthbuffer, resolution, viewPort, lock);
hipDeviceSynchronize();
//------------------------------
//fragment shader
//------------------------------
// Multiple lighting
ray* lights = new ray[lightNum];
lights[0].position = glm::vec3(1.0f, 1.0f, 0.0f);
lights[0].color = glm::vec3(1.0f);
lights[1].position = glm::vec3(1.0f, 1.0f, 1.0f);
lights[1].color = glm::vec3(0.15f, 0.48f, 0.09f);
lights[2].position = glm::vec3(1.0f, 1.0f, 1.0f);
lights[2].color =glm::vec3(0.13f, 0.16f, 0.84f);
lights[3].position = glm::vec3(0.0f, 1.0f, 1.0f);
lights[3].color = glm::vec3(0.43f, 0.16f, 0.54f);
hipMemcpy(light, lights, lightNum * sizeof(ray), hipMemcpyHostToDevice);
delete [] lights;
hipLaunchKernelGGL(( fragmentShadeKernel), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, depthbuffer, resolution, light, lightNum, depthFlag, flatcolorFlag, color, multicolorFlag);
hipDeviceSynchronize();
//------------------------------
//write fragments to framebuffer
//------------------------------
hipLaunchKernelGGL(( render), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, resolution, depthbuffer, framebuffer, antialiasing);
hipLaunchKernelGGL(( sendImageToPBO), dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, PBOpos, resolution, framebuffer);
hipDeviceSynchronize();
kernelCleanup();
checkCUDAError("Kernel failed!");
}
void kernelCleanup(){
hipFree( primitives );
hipFree( device_vbo );
hipFree( device_cbo );
hipFree( device_ibo );
hipFree( device_nbo );
hipFree( framebuffer );
hipFree( depthbuffer );
hipFree( lock );
hipFree( light );
hipFree( transform );
}
| d049b208048e51e52a8628831234adf198a84e00.cu | // CIS565 CUDA Rasterizer: A simple rasterization pipeline for Patrick Cozzi's CIS565: GPU Computing at the University of Pennsylvania
// Written by Yining Karl Li, Copyright (c) 2012 University of Pennsylvania
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <thrust/random.h>
#include "rasterizeKernels.h"
#include "rasterizeTools.h"
#if CUDA_VERSION >= 5000
#include <helper_math.h>
#else
#include <cutil_math.h>
#endif
glm::vec3* framebuffer;
fragment* depthbuffer;
int* lock;
ray* light;
int lightNum = 4;
cudaMat4* transform;
float* device_vbo;
float* device_cbo;
int* device_ibo;
float* device_nbo;
triangle* primitives;
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
//Handy dandy little hashing function that provides seeds for random number generation
__host__ __device__ unsigned int hash(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
//Writes a given fragment to a fragment buffer at a given location
__host__ __device__ void writeToDepthbuffer(int x, int y, fragment frag, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
depthbuffer[index] = frag;
}
}
//Reads a fragment from a given location in a fragment buffer
__host__ __device__ fragment getFromDepthbuffer(int x, int y, fragment* depthbuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return depthbuffer[index];
}else{
fragment f;
return f;
}
}
//Writes a given pixel to a pixel buffer at a given location
__host__ __device__ void writeToFramebuffer(int x, int y, glm::vec3 value, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
framebuffer[index] = value;
}
}
//Reads a pixel from a pixel buffer at a given location
__host__ __device__ glm::vec3 getFromFramebuffer(int x, int y, glm::vec3* framebuffer, glm::vec2 resolution){
if(x<resolution.x && y<resolution.y){
int index = (y*resolution.x) + x;
return framebuffer[index];
}else{
return glm::vec3(0,0,0);
}
}
//Kernel that clears a given pixel buffer with a given color
__global__ void clearImage(glm::vec2 resolution, glm::vec3* image, glm::vec3 color){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
image[index] = color;
}
}
//Kernel that clears a given fragment buffer with a given fragment
__global__ void clearDepthBuffer(glm::vec2 resolution, fragment* buffer, fragment frag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
fragment f = frag;
f.position.x = x;
f.position.y = y;
buffer[index] = f;
}
}
//Kernel that writes the image to the OpenGL PBO directly.
__global__ void sendImageToPBO(uchar4* PBOpos, glm::vec2 resolution, glm::vec3* image){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
glm::vec3 color;
color.x = image[index].x*255.0;
color.y = image[index].y*255.0;
color.z = image[index].z*255.0;
if(color.x>255){
color.x = 255;
}
if(color.y>255){
color.y = 255;
}
if(color.z>255){
color.z = 255;
}
// Each thread writes one pixel location in the texture (textel)
PBOpos[index].w = 0;
PBOpos[index].x = color.x;
PBOpos[index].y = color.y;
PBOpos[index].z = color.z;
}
}
//TODO: Implement a vertex shader
__global__ void vertexShadeKernel(float* vbo, int vbosize, float* nbo, int nbosize, const cudaMat4 transform){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < vbosize/3){
glm::vec3 newVertex = multiplyMV(transform, glm::vec4(vbo[3 * index], vbo[3 * index + 1], vbo[3 * index + 2], 1.0f));
glm::vec3 normal = glm::normalize(multiplyMV(transform, glm::vec4(nbo[3 * index], nbo[3 * index + 1], nbo[3 * index + 2], 1.0f)));
vbo[3 * index] = newVertex.x;
vbo[3 * index + 1] = newVertex.y;
vbo[3 * index + 2] = newVertex.z;
nbo[3 * index] = normal.x;
nbo[3 * index + 1] = normal.y;
nbo[3 * index + 2] = normal.z;
}
}
//TODO: Implement primitive assembly
__global__ void primitiveAssemblyKernel(float* vbo, int vbosize, float* nbo, int nbosize, float* cbo, int cbosize, int* ibo, int ibosize, triangle* primitives){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int primitivesCount = ibosize/3;
if(index < primitivesCount){
// The actual indices of vertices are stored in the index buffer object.
const int* vertexIndex = &ibo[3 * index];
primitives[index].p0 = glm::vec3(vbo[3 * vertexIndex[0]], vbo[3 * vertexIndex[0] +1], vbo[3 * vertexIndex[0] + 2]);
primitives[index].p1 = glm::vec3(vbo[3 * vertexIndex[1]], vbo[3 * vertexIndex[1] +1], vbo[3 * vertexIndex[1] + 2]);
primitives[index].p2 = glm::vec3(vbo[3 * vertexIndex[2]], vbo[3 * vertexIndex[2] +1], vbo[3 * vertexIndex[2] + 2]);
// Load normals of vertices.
primitives[index].n0 = glm::vec3(nbo[3 * vertexIndex[0]], nbo[3 * vertexIndex[0] +1], nbo[3 * vertexIndex[0] + 2]);
primitives[index].n1 = glm::vec3(nbo[3 * vertexIndex[1]], nbo[3 * vertexIndex[1] +1], nbo[3 * vertexIndex[1] + 2]);
primitives[index].n2 = glm::vec3(nbo[3 * vertexIndex[2]], nbo[3 * vertexIndex[2] +1], nbo[3 * vertexIndex[2] + 2]);
// The size of cbo is nine, only needs to give the nine RGB values to the color in the triangle variable's color vector.
primitives[index].c0 = glm::vec3(cbo[0], cbo[1], cbo[2]);
primitives[index].c1 = glm::vec3(cbo[3], cbo[4], cbo[5]);
primitives[index].c2 = glm::vec3(cbo[6], cbo[7], cbo[8]);
}
}
//TODO: Implement a rasterization method, such as scanline.
__global__ void rasterizationKernel(triangle* primitives, int primitivesCount, fragment* depthbuffer, glm::vec2 resolution, glm::vec3 view, int* lock){
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if(index < primitivesCount){
// Initialize triangle and back face culling if the normal of z is point to the back
triangle currentTriangle = primitives[index];
if (glm::dot(currentTriangle.n0, view) > 0.0f || glm::dot(currentTriangle.n1, view) > 0.0f || glm::dot(currentTriangle.n2, view) > 0.0f )
return;
glm::vec3 normal = glm::normalize((currentTriangle.n0 + currentTriangle.n1 + currentTriangle.n2) / 3.0f);
// Add min max vectors and integers for the bounds and project the min back to the screen coordinate
glm::vec3 minPoint, maxPoint;
int minX, minY, maxX, maxY;
getAABBForTriangle(currentTriangle, minPoint, maxPoint);
scale2screen(minPoint, minX, maxY, resolution);
scale2screen(maxPoint, maxX, minY, resolution);
if (minX > resolution.x - 1 || minY > resolution.y - 1 || minX < 0 || minY < 0 )
return;
// Clipping the points outside the image inside.
minX = minX < 0 ? 0 : minX;
minY = minY < 0 ? 0 : minY;
maxX = (maxX > resolution.x - 1) ? resolution.x - 1 : maxX;
maxY = (maxY > resolution.y - 1) ? resolution.y - 1 : maxY;
// Loop and rasterize the interpolated area across the current primitive.
int idx;
float depth = 0;
for (int y = minY; y < maxY; ++ y) {
for (int x = minX; x < maxX; ++ x) {
idx = y * resolution.x + x;
glm::vec3 barycentricCoordinates = calculateBarycentricCoordinate(currentTriangle, screen2scale(x, y, resolution));
// Determine whether the current pixel is within the bounds of the current primitive
if (!isBarycentricCoordInBounds(barycentricCoordinates))
continue;
depth = getZAtCoordinate(barycentricCoordinates, currentTriangle);
bool loopFlag = true;
do {
if (atomicCAS(&lock[idx], 0, 1) == 0) {
//Depth Test
if(depth > depthbuffer[idx].position.z) {
depthbuffer[idx].position.x = screen2scale(x, y, resolution).x;
depthbuffer[idx].position.y = screen2scale(x, y, resolution).y;
depthbuffer[idx].position.z = depth;
depthbuffer[idx].normal = normal;
depthbuffer[idx].color = barycentricCoordinates.x * currentTriangle.c0 + barycentricCoordinates.y * currentTriangle.c1 + barycentricCoordinates.z * currentTriangle.c2;
}
loopFlag = false;
__threadfence();
atomicExch(&(lock[idx]), 0);
}
} while (loopFlag);
} // for x
} // for y
}
}
//TODO: Implement a fragment shader
__global__ void fragmentShadeKernel(fragment* depthbuffer, glm::vec2 resolution, ray* light, int lightNum, bool depthFlag, bool flatcolorFlag, int color, bool multicolorFlag){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if (x > resolution.x && y > resolution.y)
return;
// Initialize the buffer and iteration time for lights
glm::vec3 frag = depthbuffer[index].color;
depthbuffer[index].color = glm::vec3(0.0f);
lightNum = multicolorFlag ? lightNum : 1;
// Shading lights on the cow.
for (int i = 0; i < lightNum; ++ i) {
// Compute the vectors of light, view and H in Blinn-Phong lighting model, referring to http://en.wikipedia.org/wiki/Blinn%E2%80%93Phong_shading_model
glm::vec3 position = depthbuffer[index].position;
glm::vec3 normal = depthbuffer[index].normal;
glm::vec3 L = glm::normalize(light[i].position - position);
glm::vec3 V = glm::normalize(- position);
glm::vec3 H = glm::normalize(L + V);
// Compute the diffusion and Blinn-Phong lighting
float diffuse = glm::max(glm::dot(L, normal), 0.0f);
float specular = glm::max(glm::pow(glm::dot(H, normal), 1000.0f), 0.0f);
// Compute final color
if (depthFlag) {
depthbuffer[index].color = depthbuffer[index].position.z * glm::vec3(1.0f);
return;
} else if (flatcolorFlag) {
switch(color) {
case(0):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * light[i].color;
break;
case(1):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.63f, 0.06f, 0.04f) * light[i].color;
break;
case(2):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.15f, 0.48f, 0.09f) * light[i].color;
break;
case(3):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.13f, 0.16f, 0.84f) * light[i].color;
break;
case(4):
depthbuffer[index].color += (0.5f * diffuse + 0.5f * specular) * glm::vec3(0.43f, 0.16f, 0.54f) * light[i].color;
break;
}
} else
depthbuffer[index].color += frag * (0.5f * diffuse + 0.5f * specular) * light[i].color;
}
depthbuffer[index].color /= multicolorFlag ? (float)lightNum * 0.3f : (float)lightNum * 0.5f;
}
//Writes fragment colors to the framebuffer
__global__ void render(glm::vec2 resolution, fragment* depthbuffer, glm::vec3* framebuffer, bool antialiasing){
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * resolution.x);
if(x<=resolution.x && y<=resolution.y){
if (ANTIALIASING || antialiasing) {
// Using super sampling and initialize with the central pixel, the corner flag indicates which corner the pixel is
int sampleNum = 1;
glm::vec3 sampling = depthbuffer[index].color;
// Left side
sampleNum += (x > 0) ? 1 : 0;
sampling += (x > 0) ? depthbuffer[index - 1].color : glm::vec3(0.0f);
// Upper side
sampleNum += (y > 0) ? 1 : 0;
sampling += (y > 0) ? depthbuffer[index - (int)resolution.x].color : glm::vec3(0.0f);
// Right side
sampleNum += (x < resolution.x - 1) ? 1 : 0;
sampling += (x < resolution.x - 1) ? depthbuffer[index + 1].color : glm::vec3(0.0f);
// Bottom side
sampleNum += (x < resolution.y - 1) ? 1 : 0;
sampling += (x < resolution.y - 1) ? depthbuffer[index + (int)resolution.x].color : glm::vec3(0.0f);
// Four corners
sampleNum += (sampleNum == 5) ? 4 : 0;
sampling += (sampleNum == 9) ? (depthbuffer[index - (int)resolution.x - 1].color + depthbuffer[index - (int)resolution.x + 1].color
+ depthbuffer[index + (int)resolution.x - 1].color + depthbuffer[index + (int)resolution.x + 1].color) : glm::vec3(0.0f);
if (sampleNum == 9) {
framebuffer[index] = sampling / 9.0f;
return;
}
// Two corners
sampleNum += (sampleNum == 4) ? 2 : 0;
if (sampleNum == 6) {
sampling += x == 0 ? depthbuffer[index - (int)resolution.x + 1].color + depthbuffer[index + (int)resolution.x + 1].color: glm::vec3(0.0f);
sampling += y == 0 ? depthbuffer[index + (int)resolution.x - 1].color + depthbuffer[index + (int)resolution.x + 1].color: glm::vec3(0.0f);
sampling += x == resolution.x ? depthbuffer[index - (int)resolution.x - 1].color + depthbuffer[index + (int)resolution.x - 1].color: glm::vec3(0.0f);
sampling += y == resolution.y ? depthbuffer[index - (int)resolution.x - 1].color + depthbuffer[index - (int)resolution.x + 1].color: glm::vec3(0.0f);
framebuffer[index] = sampling / 6.0f;
return;
}
// One corner
sampleNum += (sampleNum == 3) ? 1 : 0;
if((x == 0) && (y == 0)) {
sampling += depthbuffer[index + (int)resolution.x + 1].color;
} else if ((x == resolution.x -1) && (y == 0)) {
sampling += depthbuffer[index + (int)resolution.x - 1].color;
} else if ((y == resolution.y -1) && (x == 0)) {
sampling += depthbuffer[index - (int)resolution.x + 1].color;
} else {
sampling += depthbuffer[index - (int)resolution.x - 1].color;
}
framebuffer[index] = sampling / 4.0f;
} else
framebuffer[index] = depthbuffer[index].color;
}
}
// Wrapper for the __global__ call that sets up the kernel calls and does a ton of memory management
void cudaRasterizeCore(uchar4* PBOpos, glm::vec2 resolution, float frame, float* vbo, int vbosize, float* cbo, int cbosize, int* ibo, int ibosize, float* nbo, int nbosize, const cudaMat4* transform, glm::vec3 viewPort, bool antialiasing, bool depthFlag, bool flatcolorFlag, int color, bool multicolorFlag){
// set up crucial magic
int tileSize = 8;
dim3 threadsPerBlock(tileSize, tileSize);
dim3 fullBlocksPerGrid((int)ceil(float(resolution.x)/float(tileSize)), (int)ceil(float(resolution.y)/float(tileSize)));
// set up framebuffer
framebuffer = NULL;
cudaMalloc((void**)&framebuffer, (int)resolution.x*(int)resolution.y*sizeof(glm::vec3));
// set up depthbuffer
depthbuffer = NULL;
cudaMalloc((void**)&depthbuffer, (int)resolution.x*(int)resolution.y*sizeof(fragment));
// set up lock
lock = NULL;
cudaMalloc((void**)&lock, (int)resolution.x * (int)resolution.y * sizeof(int));
// set up light
light = NULL;
cudaMalloc((void**)&light, lightNum * sizeof(ray));
//kernel launches to black out accumulated/unaccumlated pixel buffers and clear our scattering states
clearImage<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, framebuffer, glm::vec3(0,0,0));
fragment frag;
frag.color = glm::vec3(0,0,0);
frag.normal = glm::vec3(0,0,0);
frag.position = glm::vec3(0,0,-10000);
clearDepthBuffer<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer,frag);
//------------------------------
//memory stuff
//------------------------------
primitives = NULL;
cudaMalloc((void**)&primitives, (ibosize/3)*sizeof(triangle));
// Index Buffer Object
device_ibo = NULL;
cudaMalloc((void**)&device_ibo, ibosize*sizeof(int));
cudaMemcpy( device_ibo, ibo, ibosize*sizeof(int), cudaMemcpyHostToDevice);
// Vertex Buffer Object
device_vbo = NULL;
cudaMalloc((void**)&device_vbo, vbosize*sizeof(float));
cudaMemcpy( device_vbo, vbo, vbosize*sizeof(float), cudaMemcpyHostToDevice);
// Color Buffer Object
device_cbo = NULL;
cudaMalloc((void**)&device_cbo, cbosize*sizeof(float));
cudaMemcpy( device_cbo, cbo, cbosize*sizeof(float), cudaMemcpyHostToDevice);
// Normal Buffer Object
device_nbo = NULL;
cudaMalloc((void**)&device_nbo, nbosize*sizeof(float));
cudaMemcpy( device_nbo, nbo, nbosize*sizeof(float), cudaMemcpyHostToDevice);
tileSize = 32;
int primitiveBlocks = ceil(((float)vbosize/3)/((float)tileSize));
//------------------------------
//vertex shader
//------------------------------
vertexShadeKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_nbo, nbosize, *transform);
cudaDeviceSynchronize();
//------------------------------
//primitive assembly
//------------------------------
primitiveBlocks = ceil(((float)ibosize/3)/((float)tileSize));
primitiveAssemblyKernel<<<primitiveBlocks, tileSize>>>(device_vbo, vbosize, device_nbo, nbosize, device_cbo, cbosize, device_ibo, ibosize, primitives);
cudaDeviceSynchronize();
//------------------------------
//rasterization
//------------------------------
rasterizationKernel<<<primitiveBlocks, tileSize>>>(primitives, ibosize/3, depthbuffer, resolution, viewPort, lock);
cudaDeviceSynchronize();
//------------------------------
//fragment shader
//------------------------------
// Multiple lighting
ray* lights = new ray[lightNum];
lights[0].position = glm::vec3(1.0f, 1.0f, 0.0f);
lights[0].color = glm::vec3(1.0f);
lights[1].position = glm::vec3(1.0f, 1.0f, 1.0f);
lights[1].color = glm::vec3(0.15f, 0.48f, 0.09f);
lights[2].position = glm::vec3(1.0f, 1.0f, 1.0f);
lights[2].color =glm::vec3(0.13f, 0.16f, 0.84f);
lights[3].position = glm::vec3(0.0f, 1.0f, 1.0f);
lights[3].color = glm::vec3(0.43f, 0.16f, 0.54f);
cudaMemcpy(light, lights, lightNum * sizeof(ray), cudaMemcpyHostToDevice);
delete [] lights;
fragmentShadeKernel<<<fullBlocksPerGrid, threadsPerBlock>>>(depthbuffer, resolution, light, lightNum, depthFlag, flatcolorFlag, color, multicolorFlag);
cudaDeviceSynchronize();
//------------------------------
//write fragments to framebuffer
//------------------------------
render<<<fullBlocksPerGrid, threadsPerBlock>>>(resolution, depthbuffer, framebuffer, antialiasing);
sendImageToPBO<<<fullBlocksPerGrid, threadsPerBlock>>>(PBOpos, resolution, framebuffer);
cudaDeviceSynchronize();
kernelCleanup();
checkCUDAError("Kernel failed!");
}
void kernelCleanup(){
cudaFree( primitives );
cudaFree( device_vbo );
cudaFree( device_cbo );
cudaFree( device_ibo );
cudaFree( device_nbo );
cudaFree( framebuffer );
cudaFree( depthbuffer );
cudaFree( lock );
cudaFree( light );
cudaFree( transform );
}
|
8d0ed1895bacd85cd4315f1bacc4a896fbcd098f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
__global__ void
zmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaDoubleComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaDoubleComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
hipLaunchKernelGGL(( zmgecsrmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream(),
m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
| 8d0ed1895bacd85cd4315f1bacc4a896fbcd098f.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
__global__ void
zmgecsrmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * dx,
magmaDoubleComplex beta,
magmaDoubleComplex * dy)
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
extern __shared__ magmaDoubleComplex dot[];
if( row<num_rows ){
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0);
int start = drowptr[ row ];
int end = drowptr[ row+1 ];
for( j=start; j<end; j++ ){
int col = dcolind [ j ];
magmaDoubleComplex val = dval[ j ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[ col + i*num_cols ];
}
for( int i=0; i<num_vecs; i++ )
dy[ row +i*num_cols ] = alpha * dot[ threadIdx.x + i*blockDim.x ]
+ beta * dy[ row + i*num_cols ];
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is CSR.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
alpha magmaDoubleComplex
scalar multiplier
@param[in]
dval magmaDoubleComplex_ptr
array containing values of A in CSR
@param[in]
drowptr magmaIndex_ptr
rowpointer of A in CSR
@param[in]
dcolind magmaIndex_ptr
columnindices of A in CSR
@param[in]
dx magmaDoubleComplex_ptr
input vector x
@param[in]
beta magmaDoubleComplex
scalar multiplier
@param[out]
dy magmaDoubleComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C" magma_int_t
magma_zmgecsrmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magmaDoubleComplex alpha,
magmaDoubleComplex_ptr dval,
magmaIndex_ptr drowptr,
magmaIndex_ptr dcolind,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex beta,
magmaDoubleComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ), 1, 1);
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( magmaDoubleComplex ); // num_vecs vectors
zmgecsrmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream()>>>
(m, n, num_vecs, alpha, dval, drowptr, dcolind, dx, beta, dy);
return MAGMA_SUCCESS;
}
|
29befbcabc65259f9863f6acd4fdc0d516186f82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void vectorAdd(const int *a, const int *b, int *c, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
} | 29befbcabc65259f9863f6acd4fdc0d516186f82.cu | #include "includes.h"
__global__ void vectorAdd(const int *a, const int *b, int *c, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
} |
b7b8da6419792f684b037ed871f0dad225a29a13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Config.h"
#include "propagation_kernels.h"
#include <stdio.h>
#define L 6
#define LL 36
#define LS 21
// values from 32 to 512 give good results.
// 32 gives slightly better results (on a K40)
#define BLOCK_SIZE_X 32
#define MAX_BLOCKS_X 65535 // CUDA constraint
__device__ float hipo(float x, float y) {
return sqrt(x*x + y*y);
}
__device__ void sincos4(float x, float& sin, float& cos) {
// Had this writen with explicit division by factorial.
// The *whole* fitting test ran like 2.5% slower on MIC, sigh.
cos = 1;
sin = x; x *= x * 0.5f;
cos -= x; x *= x * 0.33333333f;
sin -= x; x *= x * 0.25f;
cos += x;
}
// computeJacobianSimple works on values that are in registers.
// Registers are thread-private. Thus this function has no notion of
// parallelism. It is ran serially by each calling thread.
__device__ void computeJacobianSimple(float *errorProp,
float s, float k, float p, float pxin, float pyin, float pzin,
float TP, float cosTP, float sinTP, int N) {
// std::cout << "total path s=" << s << std::endl;
// TD = s*pt/p;
// TP = TD/(pt*k) = s/(p*k);
float dTPdpx = -s*pxin/(k*p*p*p);
float dTPdpy = -s*pyin/(k*p*p*p);
float dTPdpz = -s*pzin/(k*p*p*p);
//ok let's assume that the quantity with no error is the angular path (phase change)
dTPdpx = 0;
dTPdpy = 0;
dTPdpz = 0;
//derive these to compute jacobian
//x = xin + k*(pxin*sinTP-pyin*(1-cosTP));
//y = yin + k*(pyin*sinTP+pxin*(1-cosTP));
//z = zin + k*TP*pzin;
//px = pxin*cosTP-pyin*sinTP;
//py = pyin*cosTP+pxin*sinTP;
//pz = pzin;
//jacobian
errorProp[(0*L + 0)] = 1.; //dxdx
errorProp[(0*L + 1)] = 0.; //dxdy
errorProp[(0*L + 2)] = 0.; //dxdz
errorProp[(0*L + 3)] = k*(sinTP + pxin*cosTP*dTPdpx - pyin*sinTP*dTPdpx); //dxdpx
errorProp[(0*L + 4)] = k*(pxin*cosTP*dTPdpy - 1. + cosTP - pyin*sinTP*dTPdpy); //dxdpy
errorProp[(0*L + 5)] = k*dTPdpz*(pxin*cosTP - pyin*sinTP); //dxdpz
errorProp[(1*L + 0)] = 0.; //dydx
errorProp[(1*L + 1)] = 1.; //dydy
errorProp[(1*L + 2)] = 0.; //dydz
errorProp[(1*L + 3)] = k*(pyin*cosTP*dTPdpx + 1. - cosTP + pxin*sinTP*dTPdpx); //dydpx
errorProp[(1*L + 4)] = k*(sinTP + pyin*cosTP*dTPdpy + pxin*sinTP*dTPdpy); //dydpy
errorProp[(1*L + 5)] = k*dTPdpz*(pyin*cosTP + pxin*sinTP); //dydpz
errorProp[(2*L + 0)] = 0.; //dzdx
errorProp[(2*L + 1)] = 0.; //dzdy
errorProp[(2*L + 2)] = 1.; //dzdz
errorProp[(2*L + 3)] = k*pzin*dTPdpx; //dzdpx
errorProp[(2*L + 4)] = k*pzin*dTPdpy; //dzdpy
errorProp[(2*L + 5)] = k*(TP + dTPdpz*pzin); //dzdpz
errorProp[(3*L + 0)] = 0.; //dpxdx
errorProp[(3*L + 1)] = 0.; //dpxdy
errorProp[(3*L + 2)] = 0.; //dpxdz
errorProp[(3*L + 3)] = cosTP - dTPdpx*(pxin*sinTP + pyin*cosTP); //dpxdpx
errorProp[(3*L + 4)] = -sinTP - dTPdpy*(pxin*sinTP + pyin*cosTP); //dpxdpy
errorProp[(3*L + 5)] = -dTPdpz*(pxin*sinTP + pyin*cosTP); //dpxdpz
errorProp[(4*L + 0)] = 0.; //dpydx
errorProp[(4*L + 1)] = 0.; //dpydy
errorProp[(4*L + 2)] = 0.; //dpydz
errorProp[(4*L + 3)] = +sinTP - dTPdpx*(pyin*sinTP - pxin*cosTP); //dpydpx
errorProp[(4*L + 4)] = +cosTP - dTPdpy*(pyin*sinTP - pxin*cosTP); //dpydpy
errorProp[(4*L + 5)] = -dTPdpz*(pyin*sinTP - pxin*cosTP); //dpydpz
errorProp[(5*L + 0)] = 0.; //dpzdx
errorProp[(5*L + 1)] = 0.; //dpzdy
errorProp[(5*L + 2)] = 0.; //dpzdz
errorProp[(5*L + 3)] = 0.; //dpzdpx
errorProp[(5*L + 4)] = 0.; //dpzdpy
errorProp[(5*L + 5)] = 1.; //dpzdpz
}
/// Compute MsRad /////////////////////////////////////////////////////////////
// Not passing msRad.stride, as QF == 1 (second dim f msRad)
__device__ void computeMsRad_fn(const float* __restrict__ msPar,
size_t stride_msPar, float* msRad, int N, int n) {
/*int n = threadIdx.x + blockIdx.x * blockDim.x;*/
if (n < N) {
*msRad = hipo(msPar[n], msPar[n + stride_msPar]);
}
}
__device__
void helixAtRFromIterative_fn(float *inPar, size_t inPar_stride,
int *inChg, float *outPar, size_t outPar_stride, float msRad,
float *errorProp_reg, int N, int n) {
size_t opN = outPar_stride;
size_t ipN = inPar_stride;
/*int n = threadIdx.x + blockIdx.x * blockDim.x;*/
float outPar_reg[5];
if (n < N) {
for (int j = 0; j < 5; ++j) {
outPar_reg[j] = outPar[n+j*opN];
}
const float& xin = inPar[n + 0*ipN];
const float& yin = inPar[n + 1*ipN];
const float& pxin = inPar[n + 3*ipN];
const float& pyin = inPar[n + 4*ipN];
const float& pzin = inPar[n + 5*ipN];
const float& r = msRad;
float r0 = hipo(xin, yin);
if (fabs(r-r0)<0.0001) {
// get an identity matrix
computeJacobianSimple(errorProp_reg, 0, 1, 1, 1, 1, 1, 0, 1, 0, N);
return; // continue;
}
float pt2 = pxin*pxin+pyin*pyin;
float pt = sqrt(pt2);
float ptinv = 1./pt;
float pt2inv = ptinv*ptinv;
//p=0.3Br => r=p/(0.3*B)
float k = inChg[n] * 100. / (-0.299792458*Config::Bfield);
float invcurvature = 1./(pt*k);//in 1./cm
float ctgTheta=pzin*ptinv;
//variables to be updated at each iterations
float totalDistance = 0;
//derivatives initialized to value for first iteration, i.e. distance = r-r0in
float dTDdx = r0>0. ? -xin/r0 : 0.;
float dTDdy = r0>0. ? -yin/r0 : 0.;
float dTDdpx = 0.;
float dTDdpy = 0.;
//temporaries used within the loop (declare here to reduce memory operations)
float x = 0.;
float y = 0.;
float px = 0.;
float py = 0.;
float cosAP=0.;
float sinAP=0.;
float dAPdx = 0.;
float dAPdy = 0.;
float dAPdpx = 0.;
float dAPdpy = 0.;
// float dxdvar = 0.;
// float dydvar = 0.;
//5 iterations is a good starting point
//const unsigned int Niter = 10;
// const unsigned int Niter = 5+std::round(r-r0)/2;
for (unsigned int iter=0; iter < Config::Niter; ++iter) {
x = outPar_reg[0];
y = outPar_reg[1];
px = outPar_reg[3];
py = outPar_reg[4];
r0 = hipo(outPar_reg[0], outPar_reg[1]);
totalDistance += (r-r0);
if (Config::useTrigApprox) { // TODO: uncomment
sincos4((r-r0)*invcurvature, sinAP, cosAP);
} else {
cosAP=cos((r-r0)*invcurvature);
sinAP=sin((r-r0)*invcurvature);
}
//helix propagation formulas
//http://www.phys.ufl.edu/~avery/fitting/fitting4.pdf
outPar_reg[0] = outPar_reg[0] + k*(px*sinAP-py*(1-cosAP));
outPar_reg[1] = outPar_reg[1] + k*(py*sinAP+px*(1-cosAP));
outPar_reg[2] = outPar_reg[2] + (r-r0)*ctgTheta;
outPar_reg[3] = px*cosAP-py*sinAP;
outPar_reg[4] = py*cosAP+px*sinAP;
//outPar.At(n, 5, 0) = pz; //take this out as it is redundant
if (Config::useSimpleJac==0 &&
iter +1 != Config::Niter &&
r0 > 0 && fabs((r-r0)*invcurvature)>0.000000001) {
//update derivatives on total distance for next step, where totalDistance+=r-r0
//now r0 depends on px and py
r0 = 1./r0;//WARNING, now r0 is r0inv (one less temporary)
//update derivative on D
dAPdx = -x*r0*invcurvature;
dAPdy = -y*r0*invcurvature;
dAPdpx = -(r-1./r0)*invcurvature*px*pt2inv;//weird, using r0 instead of 1./r0 improves things but it should be wrong since r0 in now r0inv
dAPdpy = -(r-1./r0)*invcurvature*py*pt2inv;//weird, using r0 instead of 1./r0 improves things but it should be wrong since r0 in now r0inv
//reduce temporary variables
//dxdx = 1 + k*dAPdx*(px*cosAP - py*sinAP);
//dydx = k*dAPdx*(py*cosAP + px*sinAP);
//dTDdx -= r0*(x*dxdx + y*dydx);
dTDdx -= r0*(x*(1 + k*dAPdx*(px*cosAP - py*sinAP)) + y*(k*dAPdx*(py*cosAP + px*sinAP)));
//reuse same temporary variables
//dxdy = k*dAPdy*(px*cosAP - py*sinAP);
//dydy = 1 + k*dAPdy*(py*cosAP + px*sinAP);
//dTDdy -= r0*(x*dxdy + y*dydy);
dTDdy -= r0*(x*(k*dAPdy*(px*cosAP - py*sinAP)) + y*(1 + k*dAPdy*(py*cosAP + px*sinAP)));
//dxdpx = k*(sinAP + px*cosAP*dAPdpx - py*sinAP*dAPdpx);
//dydpx = k*(py*cosAP*dAPdpx + 1. - cosAP + px*sinAP*dAPdpx);
//dTDdpx -= r0*(x*dxdpx + y*dydpx);
dTDdpx -= r0*(x*(k*(sinAP + px*cosAP*dAPdpx - py*sinAP*dAPdpx)) + y*(k*(py*cosAP*dAPdpx + 1. - cosAP + px*sinAP*dAPdpx)));
//dxdpy = k*(px*cosAP*dAPdpy - 1. + cosAP - py*sinAP*dAPdpy);
//dydpy = k*(sinAP + py*cosAP*dAPdpy + px*sinAP*dAPdpy);
//dTDdpy -= r0*(x*dxdpy + y*(k*dydpy);
dTDdpy -= r0*(x*(k*(px*cosAP*dAPdpy - 1. + cosAP - py*sinAP*dAPdpy)) + y*(k*(sinAP + py*cosAP*dAPdpy + px*sinAP*dAPdpy)));
}
float& TD=totalDistance;
float TP=TD*invcurvature;//totalAngPath
float& iC=invcurvature;
float dCdpx = k*pxin*ptinv;
float dCdpy = k*pyin*ptinv;
float dTPdx = dTDdx*iC;
float dTPdy = dTDdy*iC;
float dTPdpx = (dTDdpx - TD*dCdpx*iC)*iC; // MT change: avoid division
float dTPdpy = (dTDdpy - TD*dCdpy*iC)*iC; // MT change: avoid division
float cosTP, sinTP;
if (Config::useTrigApprox) {
sincos4(TP, sinTP, cosTP);
} else {
cosTP = cos(TP);
sinTP = sin(TP);
}
if (Config::useSimpleJac) {
//assume total path length s as given and with no uncertainty
float p = pt2 + pzin*pzin;
p = sqrt(p);
float s = TD*p*ptinv;
computeJacobianSimple(errorProp_reg, s, k, p, pxin, pyin, pzin, TP, cosTP, sinTP, N);
} else {
//now try to make full jacobian
//derive these to compute jacobian
//x = xin + k*(pxin*sinTP-pyin*(1-cosTP));
//y = yin + k*(pyin*sinTP+pxin*(1-cosTP));
//z = zin + k*TP*pzin;
//px = pxin*cosTP-pyin*sinTP;
//py = pyin*cosTP+pxin*sinTP;
//pz = pzin;
//jacobian
errorProp_reg[(0*L + 0)] = 1 + k*dTPdx*(pxin*cosTP - pyin*sinTP); //dxdx;
errorProp_reg[(0*L + 1)] = k*dTPdy*(pxin*cosTP - pyin*sinTP); //dxdy;
errorProp_reg[(0*L + 2)] = 0.;
errorProp_reg[(0*L + 3)] = k*(sinTP + pxin*cosTP*dTPdpx - pyin*sinTP*dTPdpx); //dxdpx;
errorProp_reg[(0*L + 4)] = k*(pxin*cosTP*dTPdpy - 1. + cosTP - pyin*sinTP*dTPdpy);//dxdpy;
errorProp_reg[(0*L + 5)] = 0.;
errorProp_reg[(1*L + 0)] = k*dTPdx*(pyin*cosTP + pxin*sinTP); //dydx;
errorProp_reg[(1*L + 1)] = 1 + k*dTPdy*(pyin*cosTP + pxin*sinTP); //dydy;
errorProp_reg[(1*L + 2)] = 0.;
errorProp_reg[(1*L + 3)] = k*(pyin*cosTP*dTPdpx + 1. - cosTP + pxin*sinTP*dTPdpx);//dydpx;
errorProp_reg[(1*L + 4)] = k*(sinTP + pyin*cosTP*dTPdpy + pxin*sinTP*dTPdpy); //dydpy;
errorProp_reg[(1*L + 5)] = 0.;
errorProp_reg[(2*L + 0)] = k*pzin*dTPdx; //dzdx;
errorProp_reg[(2*L + 1)] = k*pzin*dTPdy; //dzdy;
errorProp_reg[(2*L + 2)] = 1.;
errorProp_reg[(2*L + 3)] = k*pzin*dTPdpx;//dzdpx;
errorProp_reg[(2*L + 4)] = k*pzin*dTPdpy;//dzdpy;
errorProp_reg[(2*L + 5)] = k*TP; //dzdpz;
errorProp_reg[(3*L + 0)] = -dTPdx*(pxin*sinTP + pyin*cosTP); //dpxdx;
errorProp_reg[(3*L + 1)] = -dTPdy*(pxin*sinTP + pyin*cosTP); //dpxdy;
errorProp_reg[(3*L + 2)] = 0.;
errorProp_reg[(3*L + 3)] = cosTP - dTPdpx*(pxin*sinTP + pyin*cosTP); //dpxdpx;
errorProp_reg[(3*L + 4)] = -sinTP - dTPdpy*(pxin*sinTP + pyin*cosTP);//dpxdpy;
errorProp_reg[(3*L + 5)] = 0.;
errorProp_reg[(4*L + 0)] = -dTPdx*(pyin*sinTP - pxin*cosTP); //dpydx;
errorProp_reg[(4*L + 1)] = -dTPdy*(pyin*sinTP - pxin*cosTP); //dpydy;
errorProp_reg[(4*L + 2)] = 0.;
errorProp_reg[(4*L + 3)] = +sinTP - dTPdpx*(pyin*sinTP - pxin*cosTP);//dpydpx;
errorProp_reg[(4*L + 4)] = +cosTP - dTPdpy*(pyin*sinTP - pxin*cosTP);//dpydpy;
errorProp_reg[(4*L + 5)] = 0.;
errorProp_reg[(5*L + 0)] = 0.;
errorProp_reg[(5*L + 1)] = 0.;
errorProp_reg[(5*L + 2)] = 0.;
errorProp_reg[(5*L + 3)] = 0.;
errorProp_reg[(5*L + 4)] = 0.;
errorProp_reg[(5*L + 5)] = 1.;
}
}
// Once computations are done. Get values from registers to global memory.
for (int j = 0; j < 5; ++j) {
outPar[n + j*opN] = outPar_reg[j];
}
}
}
/// Similarity ////////////////////////////////////////////////////////////////
__device__ void similarity_fn(float* a, float *b, size_t stride_outErr,
int N, int n) {
size_t bN = stride_outErr;
// Keep most values in registers.
float b_reg[LL];
// To avoid using too many registers, tmp[] as a limited size and is reused.
float tmp[6];
/*int n = threadIdx.x + blockIdx.x * blockDim.x;*/
if (n < N) {
for (int j = 0; j < LS; j++) {
b_reg[j] = b[n + j*bN];
}
tmp[ 0] = a[0]*b_reg[ 0] + a[1]*b_reg[ 1] + a[3]*b_reg[ 6] + a[4]*b_reg[10];
tmp[ 1] = a[0]*b_reg[ 1] + a[1]*b_reg[ 2] + a[3]*b_reg[ 7] + a[4]*b_reg[11];
/*tmp[ 2] = a[0]*b_reg[ 3] + a[1]*b_reg[ 4] + a[3]*b_reg[ 8] + a[4]*b_reg[12];*/
tmp[ 3] = a[0]*b_reg[ 6] + a[1]*b_reg[ 7] + a[3]*b_reg[ 9] + a[4]*b_reg[13];
tmp[ 4] = a[0]*b_reg[10] + a[1]*b_reg[11] + a[3]*b_reg[13] + a[4]*b_reg[14];
/*tmp[ 5] = a[0]*b_reg[15] + a[1]*b_reg[16] + a[3]*b_reg[18] + a[4]*b_reg[19];*/
b[ 0*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
tmp[ 0] = a[6]*b_reg[ 0] + a[7]*b_reg[ 1] + a[9]*b_reg[ 6] + a[10]*b_reg[10];
tmp[ 1] = a[6]*b_reg[ 1] + a[7]*b_reg[ 2] + a[9]*b_reg[ 7] + a[10]*b_reg[11];
/*tmp[ 8] = a[6]*b_reg[ 3] + a[7]*b_reg[ 4] + a[9]*b_reg[ 8] + a[10]*b_reg[12];*/
tmp[ 3] = a[6]*b_reg[ 6] + a[7]*b_reg[ 7] + a[9]*b_reg[ 9] + a[10]*b_reg[13];
tmp[ 4] = a[6]*b_reg[10] + a[7]*b_reg[11] + a[9]*b_reg[13] + a[10]*b_reg[14];
/*tmp[11] = a[6]*b_reg[15] + a[7]*b_reg[16] + a[9]*b_reg[18] + a[10]*b_reg[19];*/
b[ 1*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[ 2*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
tmp[ 0] = a[12]*b_reg[ 0] + a[13]*b_reg[ 1] + b_reg[ 3] + a[15]*b_reg[ 6] + a[16]*b_reg[10] + a[17]*b_reg[15];
tmp[ 1] = a[12]*b_reg[ 1] + a[13]*b_reg[ 2] + b_reg[ 4] + a[15]*b_reg[ 7] + a[16]*b_reg[11] + a[17]*b_reg[16];
tmp[ 2] = a[12]*b_reg[ 3] + a[13]*b_reg[ 4] + b_reg[ 5] + a[15]*b_reg[ 8] + a[16]*b_reg[12] + a[17]*b_reg[17];
tmp[ 3] = a[12]*b_reg[ 6] + a[13]*b_reg[ 7] + b_reg[ 8] + a[15]*b_reg[ 9] + a[16]*b_reg[13] + a[17]*b_reg[18];
tmp[ 4] = a[12]*b_reg[10] + a[13]*b_reg[11] + b_reg[12] + a[15]*b_reg[13] + a[16]*b_reg[14] + a[17]*b_reg[19];
tmp[ 5] = a[12]*b_reg[15] + a[13]*b_reg[16] + b_reg[17] + a[15]*b_reg[18] + a[16]*b_reg[19] + a[17]*b_reg[20];
b[ 3*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[ 4*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[ 5*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
tmp[ 0] = a[18]*b_reg[ 0] + a[19]*b_reg[ 1] + a[21]*b_reg[ 6] + a[22]*b_reg[10];
tmp[ 1] = a[18]*b_reg[ 1] + a[19]*b_reg[ 2] + a[21]*b_reg[ 7] + a[22]*b_reg[11];
tmp[ 2] = a[18]*b_reg[ 3] + a[19]*b_reg[ 4] + a[21]*b_reg[ 8] + a[22]*b_reg[12];
tmp[ 3] = a[18]*b_reg[ 6] + a[19]*b_reg[ 7] + a[21]*b_reg[ 9] + a[22]*b_reg[13];
tmp[ 4] = a[18]*b_reg[10] + a[19]*b_reg[11] + a[21]*b_reg[13] + a[22]*b_reg[14];
tmp[ 5] = a[18]*b_reg[15] + a[19]*b_reg[16] + a[21]*b_reg[18] + a[22]*b_reg[19];
b[ 6*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[ 7*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[ 8*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
b[ 9*bN+n] = tmp[ 0]*a[18] + tmp[ 1]*a[19] + tmp[ 3]*a[21] + tmp[ 4]*a[22];
tmp[ 0] = a[24]*b_reg[ 0] + a[25]*b_reg[ 1] + a[27]*b_reg[ 6] + a[28]*b_reg[10];
tmp[ 1] = a[24]*b_reg[ 1] + a[25]*b_reg[ 2] + a[27]*b_reg[ 7] + a[28]*b_reg[11];
tmp[ 2] = a[24]*b_reg[ 3] + a[25]*b_reg[ 4] + a[27]*b_reg[ 8] + a[28]*b_reg[12];
tmp[ 3] = a[24]*b_reg[ 6] + a[25]*b_reg[ 7] + a[27]*b_reg[ 9] + a[28]*b_reg[13];
tmp[ 4] = a[24]*b_reg[10] + a[25]*b_reg[11] + a[27]*b_reg[13] + a[28]*b_reg[14];
tmp[ 5] = a[24]*b_reg[15] + a[25]*b_reg[16] + a[27]*b_reg[18] + a[28]*b_reg[19];
b[10*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[11*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[12*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
b[13*bN+n] = tmp[ 0]*a[18] + tmp[ 1]*a[19] + tmp[ 3]*a[21] + tmp[ 4]*a[22];
b[14*bN+n] = tmp[ 0]*a[24] + tmp[ 1]*a[25] + tmp[ 3]*a[27] + tmp[ 4]*a[28];
tmp[ 0] = b_reg[15];
tmp[ 1] = b_reg[16];
tmp[ 2] = b_reg[17];
tmp[ 3] = b_reg[18];
tmp[ 4] = b_reg[19];
tmp[ 5] = b_reg[20];
// MultHelixPropTransp
b[15*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[16*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[17*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
b[18*bN+n] = tmp[ 0]*a[18] + tmp[ 1]*a[19] + tmp[ 3]*a[21] + tmp[ 4]*a[22];
b[19*bN+n] = tmp[ 0]*a[24] + tmp[ 1]*a[25] + tmp[ 3]*a[27] + tmp[ 4]*a[28];
b[20*bN+n] = tmp[ 5];
}
}
__global__ void propagation_kernel(
const float* __restrict__ msPar, size_t stride_msPar,
float *inPar, size_t inPar_stride, int *inChg,
float *outPar, size_t outPar_stride, float *errorProp,
size_t errorProp_stride, float *outErr, size_t outErr_stride, int N) {
int grid_width = blockDim.x * gridDim.x;
int n = threadIdx.x + blockIdx.x * blockDim.x;
float msRad_reg;
// Using registers instead of shared memory is ~ 30% faster.
float errorProp_reg[LL];
// If there is more matrices than MAX_BLOCKS_X * BLOCK_SIZE_X
for (int z = 0; z < (N-1)/grid_width +1; z++) {
n += z*grid_width;
if (n < N) {
computeMsRad_fn(msPar, stride_msPar, &msRad_reg, N, n);
if (Config::doIterative) {
helixAtRFromIterative_fn(inPar, inPar_stride,
inChg, outPar, outPar_stride, msRad_reg,
errorProp_reg, N, n);
}
similarity_fn(errorProp_reg, outErr, outErr_stride, N, n);
}
}
}
void propagation_wrapper(hipStream_t& stream,
GPlex<float>& msPar,
GPlex<float>& inPar, GPlex<int>& inChg,
GPlex<float>& outPar, GPlex<float>& errorProp,
GPlex<float>& outErr,
const int N) {
int gridx = ::min((N-1)/BLOCK_SIZE_X + 1,
MAX_BLOCKS_X);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
hipLaunchKernelGGL(( propagation_kernel) , dim3(grid), dim3(block), 0, stream ,
msPar.ptr, msPar.stride,
inPar.ptr, inPar.stride, inChg.ptr,
outPar.ptr, outPar.stride, errorProp.ptr,
errorProp.stride, outErr.ptr, outErr.stride, N);
}
| b7b8da6419792f684b037ed871f0dad225a29a13.cu | #include "Config.h"
#include "propagation_kernels.h"
#include <stdio.h>
#define L 6
#define LL 36
#define LS 21
// values from 32 to 512 give good results.
// 32 gives slightly better results (on a K40)
#define BLOCK_SIZE_X 32
#define MAX_BLOCKS_X 65535 // CUDA constraint
__device__ float hipo(float x, float y) {
return sqrt(x*x + y*y);
}
__device__ void sincos4(float x, float& sin, float& cos) {
// Had this writen with explicit division by factorial.
// The *whole* fitting test ran like 2.5% slower on MIC, sigh.
cos = 1;
sin = x; x *= x * 0.5f;
cos -= x; x *= x * 0.33333333f;
sin -= x; x *= x * 0.25f;
cos += x;
}
// computeJacobianSimple works on values that are in registers.
// Registers are thread-private. Thus this function has no notion of
// parallelism. It is ran serially by each calling thread.
__device__ void computeJacobianSimple(float *errorProp,
float s, float k, float p, float pxin, float pyin, float pzin,
float TP, float cosTP, float sinTP, int N) {
// std::cout << "total path s=" << s << std::endl;
// TD = s*pt/p;
// TP = TD/(pt*k) = s/(p*k);
float dTPdpx = -s*pxin/(k*p*p*p);
float dTPdpy = -s*pyin/(k*p*p*p);
float dTPdpz = -s*pzin/(k*p*p*p);
//ok let's assume that the quantity with no error is the angular path (phase change)
dTPdpx = 0;
dTPdpy = 0;
dTPdpz = 0;
//derive these to compute jacobian
//x = xin + k*(pxin*sinTP-pyin*(1-cosTP));
//y = yin + k*(pyin*sinTP+pxin*(1-cosTP));
//z = zin + k*TP*pzin;
//px = pxin*cosTP-pyin*sinTP;
//py = pyin*cosTP+pxin*sinTP;
//pz = pzin;
//jacobian
errorProp[(0*L + 0)] = 1.; //dxdx
errorProp[(0*L + 1)] = 0.; //dxdy
errorProp[(0*L + 2)] = 0.; //dxdz
errorProp[(0*L + 3)] = k*(sinTP + pxin*cosTP*dTPdpx - pyin*sinTP*dTPdpx); //dxdpx
errorProp[(0*L + 4)] = k*(pxin*cosTP*dTPdpy - 1. + cosTP - pyin*sinTP*dTPdpy); //dxdpy
errorProp[(0*L + 5)] = k*dTPdpz*(pxin*cosTP - pyin*sinTP); //dxdpz
errorProp[(1*L + 0)] = 0.; //dydx
errorProp[(1*L + 1)] = 1.; //dydy
errorProp[(1*L + 2)] = 0.; //dydz
errorProp[(1*L + 3)] = k*(pyin*cosTP*dTPdpx + 1. - cosTP + pxin*sinTP*dTPdpx); //dydpx
errorProp[(1*L + 4)] = k*(sinTP + pyin*cosTP*dTPdpy + pxin*sinTP*dTPdpy); //dydpy
errorProp[(1*L + 5)] = k*dTPdpz*(pyin*cosTP + pxin*sinTP); //dydpz
errorProp[(2*L + 0)] = 0.; //dzdx
errorProp[(2*L + 1)] = 0.; //dzdy
errorProp[(2*L + 2)] = 1.; //dzdz
errorProp[(2*L + 3)] = k*pzin*dTPdpx; //dzdpx
errorProp[(2*L + 4)] = k*pzin*dTPdpy; //dzdpy
errorProp[(2*L + 5)] = k*(TP + dTPdpz*pzin); //dzdpz
errorProp[(3*L + 0)] = 0.; //dpxdx
errorProp[(3*L + 1)] = 0.; //dpxdy
errorProp[(3*L + 2)] = 0.; //dpxdz
errorProp[(3*L + 3)] = cosTP - dTPdpx*(pxin*sinTP + pyin*cosTP); //dpxdpx
errorProp[(3*L + 4)] = -sinTP - dTPdpy*(pxin*sinTP + pyin*cosTP); //dpxdpy
errorProp[(3*L + 5)] = -dTPdpz*(pxin*sinTP + pyin*cosTP); //dpxdpz
errorProp[(4*L + 0)] = 0.; //dpydx
errorProp[(4*L + 1)] = 0.; //dpydy
errorProp[(4*L + 2)] = 0.; //dpydz
errorProp[(4*L + 3)] = +sinTP - dTPdpx*(pyin*sinTP - pxin*cosTP); //dpydpx
errorProp[(4*L + 4)] = +cosTP - dTPdpy*(pyin*sinTP - pxin*cosTP); //dpydpy
errorProp[(4*L + 5)] = -dTPdpz*(pyin*sinTP - pxin*cosTP); //dpydpz
errorProp[(5*L + 0)] = 0.; //dpzdx
errorProp[(5*L + 1)] = 0.; //dpzdy
errorProp[(5*L + 2)] = 0.; //dpzdz
errorProp[(5*L + 3)] = 0.; //dpzdpx
errorProp[(5*L + 4)] = 0.; //dpzdpy
errorProp[(5*L + 5)] = 1.; //dpzdpz
}
/// Compute MsRad /////////////////////////////////////////////////////////////
// Not passing msRad.stride, as QF == 1 (second dim f msRad)
__device__ void computeMsRad_fn(const float* __restrict__ msPar,
size_t stride_msPar, float* msRad, int N, int n) {
/*int n = threadIdx.x + blockIdx.x * blockDim.x;*/
if (n < N) {
*msRad = hipo(msPar[n], msPar[n + stride_msPar]);
}
}
__device__
void helixAtRFromIterative_fn(float *inPar, size_t inPar_stride,
int *inChg, float *outPar, size_t outPar_stride, float msRad,
float *errorProp_reg, int N, int n) {
size_t opN = outPar_stride;
size_t ipN = inPar_stride;
/*int n = threadIdx.x + blockIdx.x * blockDim.x;*/
float outPar_reg[5];
if (n < N) {
for (int j = 0; j < 5; ++j) {
outPar_reg[j] = outPar[n+j*opN];
}
const float& xin = inPar[n + 0*ipN];
const float& yin = inPar[n + 1*ipN];
const float& pxin = inPar[n + 3*ipN];
const float& pyin = inPar[n + 4*ipN];
const float& pzin = inPar[n + 5*ipN];
const float& r = msRad;
float r0 = hipo(xin, yin);
if (fabs(r-r0)<0.0001) {
// get an identity matrix
computeJacobianSimple(errorProp_reg, 0, 1, 1, 1, 1, 1, 0, 1, 0, N);
return; // continue;
}
float pt2 = pxin*pxin+pyin*pyin;
float pt = sqrt(pt2);
float ptinv = 1./pt;
float pt2inv = ptinv*ptinv;
//p=0.3Br => r=p/(0.3*B)
float k = inChg[n] * 100. / (-0.299792458*Config::Bfield);
float invcurvature = 1./(pt*k);//in 1./cm
float ctgTheta=pzin*ptinv;
//variables to be updated at each iterations
float totalDistance = 0;
//derivatives initialized to value for first iteration, i.e. distance = r-r0in
float dTDdx = r0>0. ? -xin/r0 : 0.;
float dTDdy = r0>0. ? -yin/r0 : 0.;
float dTDdpx = 0.;
float dTDdpy = 0.;
//temporaries used within the loop (declare here to reduce memory operations)
float x = 0.;
float y = 0.;
float px = 0.;
float py = 0.;
float cosAP=0.;
float sinAP=0.;
float dAPdx = 0.;
float dAPdy = 0.;
float dAPdpx = 0.;
float dAPdpy = 0.;
// float dxdvar = 0.;
// float dydvar = 0.;
//5 iterations is a good starting point
//const unsigned int Niter = 10;
// const unsigned int Niter = 5+std::round(r-r0)/2;
for (unsigned int iter=0; iter < Config::Niter; ++iter) {
x = outPar_reg[0];
y = outPar_reg[1];
px = outPar_reg[3];
py = outPar_reg[4];
r0 = hipo(outPar_reg[0], outPar_reg[1]);
totalDistance += (r-r0);
if (Config::useTrigApprox) { // TODO: uncomment
sincos4((r-r0)*invcurvature, sinAP, cosAP);
} else {
cosAP=cos((r-r0)*invcurvature);
sinAP=sin((r-r0)*invcurvature);
}
//helix propagation formulas
//http://www.phys.ufl.edu/~avery/fitting/fitting4.pdf
outPar_reg[0] = outPar_reg[0] + k*(px*sinAP-py*(1-cosAP));
outPar_reg[1] = outPar_reg[1] + k*(py*sinAP+px*(1-cosAP));
outPar_reg[2] = outPar_reg[2] + (r-r0)*ctgTheta;
outPar_reg[3] = px*cosAP-py*sinAP;
outPar_reg[4] = py*cosAP+px*sinAP;
//outPar.At(n, 5, 0) = pz; //take this out as it is redundant
if (Config::useSimpleJac==0 &&
iter +1 != Config::Niter &&
r0 > 0 && fabs((r-r0)*invcurvature)>0.000000001) {
//update derivatives on total distance for next step, where totalDistance+=r-r0
//now r0 depends on px and py
r0 = 1./r0;//WARNING, now r0 is r0inv (one less temporary)
//update derivative on D
dAPdx = -x*r0*invcurvature;
dAPdy = -y*r0*invcurvature;
dAPdpx = -(r-1./r0)*invcurvature*px*pt2inv;//weird, using r0 instead of 1./r0 improves things but it should be wrong since r0 in now r0inv
dAPdpy = -(r-1./r0)*invcurvature*py*pt2inv;//weird, using r0 instead of 1./r0 improves things but it should be wrong since r0 in now r0inv
//reduce temporary variables
//dxdx = 1 + k*dAPdx*(px*cosAP - py*sinAP);
//dydx = k*dAPdx*(py*cosAP + px*sinAP);
//dTDdx -= r0*(x*dxdx + y*dydx);
dTDdx -= r0*(x*(1 + k*dAPdx*(px*cosAP - py*sinAP)) + y*(k*dAPdx*(py*cosAP + px*sinAP)));
//reuse same temporary variables
//dxdy = k*dAPdy*(px*cosAP - py*sinAP);
//dydy = 1 + k*dAPdy*(py*cosAP + px*sinAP);
//dTDdy -= r0*(x*dxdy + y*dydy);
dTDdy -= r0*(x*(k*dAPdy*(px*cosAP - py*sinAP)) + y*(1 + k*dAPdy*(py*cosAP + px*sinAP)));
//dxdpx = k*(sinAP + px*cosAP*dAPdpx - py*sinAP*dAPdpx);
//dydpx = k*(py*cosAP*dAPdpx + 1. - cosAP + px*sinAP*dAPdpx);
//dTDdpx -= r0*(x*dxdpx + y*dydpx);
dTDdpx -= r0*(x*(k*(sinAP + px*cosAP*dAPdpx - py*sinAP*dAPdpx)) + y*(k*(py*cosAP*dAPdpx + 1. - cosAP + px*sinAP*dAPdpx)));
//dxdpy = k*(px*cosAP*dAPdpy - 1. + cosAP - py*sinAP*dAPdpy);
//dydpy = k*(sinAP + py*cosAP*dAPdpy + px*sinAP*dAPdpy);
//dTDdpy -= r0*(x*dxdpy + y*(k*dydpy);
dTDdpy -= r0*(x*(k*(px*cosAP*dAPdpy - 1. + cosAP - py*sinAP*dAPdpy)) + y*(k*(sinAP + py*cosAP*dAPdpy + px*sinAP*dAPdpy)));
}
float& TD=totalDistance;
float TP=TD*invcurvature;//totalAngPath
float& iC=invcurvature;
float dCdpx = k*pxin*ptinv;
float dCdpy = k*pyin*ptinv;
float dTPdx = dTDdx*iC;
float dTPdy = dTDdy*iC;
float dTPdpx = (dTDdpx - TD*dCdpx*iC)*iC; // MT change: avoid division
float dTPdpy = (dTDdpy - TD*dCdpy*iC)*iC; // MT change: avoid division
float cosTP, sinTP;
if (Config::useTrigApprox) {
sincos4(TP, sinTP, cosTP);
} else {
cosTP = cos(TP);
sinTP = sin(TP);
}
if (Config::useSimpleJac) {
//assume total path length s as given and with no uncertainty
float p = pt2 + pzin*pzin;
p = sqrt(p);
float s = TD*p*ptinv;
computeJacobianSimple(errorProp_reg, s, k, p, pxin, pyin, pzin, TP, cosTP, sinTP, N);
} else {
//now try to make full jacobian
//derive these to compute jacobian
//x = xin + k*(pxin*sinTP-pyin*(1-cosTP));
//y = yin + k*(pyin*sinTP+pxin*(1-cosTP));
//z = zin + k*TP*pzin;
//px = pxin*cosTP-pyin*sinTP;
//py = pyin*cosTP+pxin*sinTP;
//pz = pzin;
//jacobian
errorProp_reg[(0*L + 0)] = 1 + k*dTPdx*(pxin*cosTP - pyin*sinTP); //dxdx;
errorProp_reg[(0*L + 1)] = k*dTPdy*(pxin*cosTP - pyin*sinTP); //dxdy;
errorProp_reg[(0*L + 2)] = 0.;
errorProp_reg[(0*L + 3)] = k*(sinTP + pxin*cosTP*dTPdpx - pyin*sinTP*dTPdpx); //dxdpx;
errorProp_reg[(0*L + 4)] = k*(pxin*cosTP*dTPdpy - 1. + cosTP - pyin*sinTP*dTPdpy);//dxdpy;
errorProp_reg[(0*L + 5)] = 0.;
errorProp_reg[(1*L + 0)] = k*dTPdx*(pyin*cosTP + pxin*sinTP); //dydx;
errorProp_reg[(1*L + 1)] = 1 + k*dTPdy*(pyin*cosTP + pxin*sinTP); //dydy;
errorProp_reg[(1*L + 2)] = 0.;
errorProp_reg[(1*L + 3)] = k*(pyin*cosTP*dTPdpx + 1. - cosTP + pxin*sinTP*dTPdpx);//dydpx;
errorProp_reg[(1*L + 4)] = k*(sinTP + pyin*cosTP*dTPdpy + pxin*sinTP*dTPdpy); //dydpy;
errorProp_reg[(1*L + 5)] = 0.;
errorProp_reg[(2*L + 0)] = k*pzin*dTPdx; //dzdx;
errorProp_reg[(2*L + 1)] = k*pzin*dTPdy; //dzdy;
errorProp_reg[(2*L + 2)] = 1.;
errorProp_reg[(2*L + 3)] = k*pzin*dTPdpx;//dzdpx;
errorProp_reg[(2*L + 4)] = k*pzin*dTPdpy;//dzdpy;
errorProp_reg[(2*L + 5)] = k*TP; //dzdpz;
errorProp_reg[(3*L + 0)] = -dTPdx*(pxin*sinTP + pyin*cosTP); //dpxdx;
errorProp_reg[(3*L + 1)] = -dTPdy*(pxin*sinTP + pyin*cosTP); //dpxdy;
errorProp_reg[(3*L + 2)] = 0.;
errorProp_reg[(3*L + 3)] = cosTP - dTPdpx*(pxin*sinTP + pyin*cosTP); //dpxdpx;
errorProp_reg[(3*L + 4)] = -sinTP - dTPdpy*(pxin*sinTP + pyin*cosTP);//dpxdpy;
errorProp_reg[(3*L + 5)] = 0.;
errorProp_reg[(4*L + 0)] = -dTPdx*(pyin*sinTP - pxin*cosTP); //dpydx;
errorProp_reg[(4*L + 1)] = -dTPdy*(pyin*sinTP - pxin*cosTP); //dpydy;
errorProp_reg[(4*L + 2)] = 0.;
errorProp_reg[(4*L + 3)] = +sinTP - dTPdpx*(pyin*sinTP - pxin*cosTP);//dpydpx;
errorProp_reg[(4*L + 4)] = +cosTP - dTPdpy*(pyin*sinTP - pxin*cosTP);//dpydpy;
errorProp_reg[(4*L + 5)] = 0.;
errorProp_reg[(5*L + 0)] = 0.;
errorProp_reg[(5*L + 1)] = 0.;
errorProp_reg[(5*L + 2)] = 0.;
errorProp_reg[(5*L + 3)] = 0.;
errorProp_reg[(5*L + 4)] = 0.;
errorProp_reg[(5*L + 5)] = 1.;
}
}
// Once computations are done. Get values from registers to global memory.
for (int j = 0; j < 5; ++j) {
outPar[n + j*opN] = outPar_reg[j];
}
}
}
/// Similarity ////////////////////////////////////////////////////////////////
__device__ void similarity_fn(float* a, float *b, size_t stride_outErr,
int N, int n) {
size_t bN = stride_outErr;
// Keep most values in registers.
float b_reg[LL];
// To avoid using too many registers, tmp[] as a limited size and is reused.
float tmp[6];
/*int n = threadIdx.x + blockIdx.x * blockDim.x;*/
if (n < N) {
for (int j = 0; j < LS; j++) {
b_reg[j] = b[n + j*bN];
}
tmp[ 0] = a[0]*b_reg[ 0] + a[1]*b_reg[ 1] + a[3]*b_reg[ 6] + a[4]*b_reg[10];
tmp[ 1] = a[0]*b_reg[ 1] + a[1]*b_reg[ 2] + a[3]*b_reg[ 7] + a[4]*b_reg[11];
/*tmp[ 2] = a[0]*b_reg[ 3] + a[1]*b_reg[ 4] + a[3]*b_reg[ 8] + a[4]*b_reg[12];*/
tmp[ 3] = a[0]*b_reg[ 6] + a[1]*b_reg[ 7] + a[3]*b_reg[ 9] + a[4]*b_reg[13];
tmp[ 4] = a[0]*b_reg[10] + a[1]*b_reg[11] + a[3]*b_reg[13] + a[4]*b_reg[14];
/*tmp[ 5] = a[0]*b_reg[15] + a[1]*b_reg[16] + a[3]*b_reg[18] + a[4]*b_reg[19];*/
b[ 0*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
tmp[ 0] = a[6]*b_reg[ 0] + a[7]*b_reg[ 1] + a[9]*b_reg[ 6] + a[10]*b_reg[10];
tmp[ 1] = a[6]*b_reg[ 1] + a[7]*b_reg[ 2] + a[9]*b_reg[ 7] + a[10]*b_reg[11];
/*tmp[ 8] = a[6]*b_reg[ 3] + a[7]*b_reg[ 4] + a[9]*b_reg[ 8] + a[10]*b_reg[12];*/
tmp[ 3] = a[6]*b_reg[ 6] + a[7]*b_reg[ 7] + a[9]*b_reg[ 9] + a[10]*b_reg[13];
tmp[ 4] = a[6]*b_reg[10] + a[7]*b_reg[11] + a[9]*b_reg[13] + a[10]*b_reg[14];
/*tmp[11] = a[6]*b_reg[15] + a[7]*b_reg[16] + a[9]*b_reg[18] + a[10]*b_reg[19];*/
b[ 1*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[ 2*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
tmp[ 0] = a[12]*b_reg[ 0] + a[13]*b_reg[ 1] + b_reg[ 3] + a[15]*b_reg[ 6] + a[16]*b_reg[10] + a[17]*b_reg[15];
tmp[ 1] = a[12]*b_reg[ 1] + a[13]*b_reg[ 2] + b_reg[ 4] + a[15]*b_reg[ 7] + a[16]*b_reg[11] + a[17]*b_reg[16];
tmp[ 2] = a[12]*b_reg[ 3] + a[13]*b_reg[ 4] + b_reg[ 5] + a[15]*b_reg[ 8] + a[16]*b_reg[12] + a[17]*b_reg[17];
tmp[ 3] = a[12]*b_reg[ 6] + a[13]*b_reg[ 7] + b_reg[ 8] + a[15]*b_reg[ 9] + a[16]*b_reg[13] + a[17]*b_reg[18];
tmp[ 4] = a[12]*b_reg[10] + a[13]*b_reg[11] + b_reg[12] + a[15]*b_reg[13] + a[16]*b_reg[14] + a[17]*b_reg[19];
tmp[ 5] = a[12]*b_reg[15] + a[13]*b_reg[16] + b_reg[17] + a[15]*b_reg[18] + a[16]*b_reg[19] + a[17]*b_reg[20];
b[ 3*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[ 4*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[ 5*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
tmp[ 0] = a[18]*b_reg[ 0] + a[19]*b_reg[ 1] + a[21]*b_reg[ 6] + a[22]*b_reg[10];
tmp[ 1] = a[18]*b_reg[ 1] + a[19]*b_reg[ 2] + a[21]*b_reg[ 7] + a[22]*b_reg[11];
tmp[ 2] = a[18]*b_reg[ 3] + a[19]*b_reg[ 4] + a[21]*b_reg[ 8] + a[22]*b_reg[12];
tmp[ 3] = a[18]*b_reg[ 6] + a[19]*b_reg[ 7] + a[21]*b_reg[ 9] + a[22]*b_reg[13];
tmp[ 4] = a[18]*b_reg[10] + a[19]*b_reg[11] + a[21]*b_reg[13] + a[22]*b_reg[14];
tmp[ 5] = a[18]*b_reg[15] + a[19]*b_reg[16] + a[21]*b_reg[18] + a[22]*b_reg[19];
b[ 6*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[ 7*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[ 8*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
b[ 9*bN+n] = tmp[ 0]*a[18] + tmp[ 1]*a[19] + tmp[ 3]*a[21] + tmp[ 4]*a[22];
tmp[ 0] = a[24]*b_reg[ 0] + a[25]*b_reg[ 1] + a[27]*b_reg[ 6] + a[28]*b_reg[10];
tmp[ 1] = a[24]*b_reg[ 1] + a[25]*b_reg[ 2] + a[27]*b_reg[ 7] + a[28]*b_reg[11];
tmp[ 2] = a[24]*b_reg[ 3] + a[25]*b_reg[ 4] + a[27]*b_reg[ 8] + a[28]*b_reg[12];
tmp[ 3] = a[24]*b_reg[ 6] + a[25]*b_reg[ 7] + a[27]*b_reg[ 9] + a[28]*b_reg[13];
tmp[ 4] = a[24]*b_reg[10] + a[25]*b_reg[11] + a[27]*b_reg[13] + a[28]*b_reg[14];
tmp[ 5] = a[24]*b_reg[15] + a[25]*b_reg[16] + a[27]*b_reg[18] + a[28]*b_reg[19];
b[10*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[11*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[12*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
b[13*bN+n] = tmp[ 0]*a[18] + tmp[ 1]*a[19] + tmp[ 3]*a[21] + tmp[ 4]*a[22];
b[14*bN+n] = tmp[ 0]*a[24] + tmp[ 1]*a[25] + tmp[ 3]*a[27] + tmp[ 4]*a[28];
tmp[ 0] = b_reg[15];
tmp[ 1] = b_reg[16];
tmp[ 2] = b_reg[17];
tmp[ 3] = b_reg[18];
tmp[ 4] = b_reg[19];
tmp[ 5] = b_reg[20];
// MultHelixPropTransp
b[15*bN+n] = tmp[ 0]*a[0] + tmp[ 1]*a[1] + tmp[ 3]*a[3] + tmp[ 4]*a[4];
b[16*bN+n] = tmp[ 0]*a[6] + tmp[ 1]*a[7] + tmp[ 3]*a[9] + tmp[ 4]*a[10];
b[17*bN+n] = tmp[ 0]*a[12] + tmp[ 1]*a[13] + tmp[ 2] + tmp[ 3]*a[15] + tmp[ 4]*a[16] + tmp[ 5]*a[17];
b[18*bN+n] = tmp[ 0]*a[18] + tmp[ 1]*a[19] + tmp[ 3]*a[21] + tmp[ 4]*a[22];
b[19*bN+n] = tmp[ 0]*a[24] + tmp[ 1]*a[25] + tmp[ 3]*a[27] + tmp[ 4]*a[28];
b[20*bN+n] = tmp[ 5];
}
}
__global__ void propagation_kernel(
const float* __restrict__ msPar, size_t stride_msPar,
float *inPar, size_t inPar_stride, int *inChg,
float *outPar, size_t outPar_stride, float *errorProp,
size_t errorProp_stride, float *outErr, size_t outErr_stride, int N) {
int grid_width = blockDim.x * gridDim.x;
int n = threadIdx.x + blockIdx.x * blockDim.x;
float msRad_reg;
// Using registers instead of shared memory is ~ 30% faster.
float errorProp_reg[LL];
// If there is more matrices than MAX_BLOCKS_X * BLOCK_SIZE_X
for (int z = 0; z < (N-1)/grid_width +1; z++) {
n += z*grid_width;
if (n < N) {
computeMsRad_fn(msPar, stride_msPar, &msRad_reg, N, n);
if (Config::doIterative) {
helixAtRFromIterative_fn(inPar, inPar_stride,
inChg, outPar, outPar_stride, msRad_reg,
errorProp_reg, N, n);
}
similarity_fn(errorProp_reg, outErr, outErr_stride, N, n);
}
}
}
void propagation_wrapper(cudaStream_t& stream,
GPlex<float>& msPar,
GPlex<float>& inPar, GPlex<int>& inChg,
GPlex<float>& outPar, GPlex<float>& errorProp,
GPlex<float>& outErr,
const int N) {
int gridx = std::min((N-1)/BLOCK_SIZE_X + 1,
MAX_BLOCKS_X);
dim3 grid(gridx, 1, 1);
dim3 block(BLOCK_SIZE_X, 1, 1);
propagation_kernel <<<grid, block, 0, stream >>>
(msPar.ptr, msPar.stride,
inPar.ptr, inPar.stride, inChg.ptr,
outPar.ptr, outPar.stride, errorProp.ptr,
errorProp.stride, outErr.ptr, outErr.stride, N);
}
|
14ca703ba672588237b93a0cce686de13ac604f8.hip | // !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#include <hip/hip_runtime.h>
#include <chrono> // for high_resolution_clock
#define BLOCK_SIZE 32
#define RADIUS 4
__global__ void gaussianSepHor(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst,
int rows, int cols, cv::cuda::PtrStep<float> d_kernelGaussConv)
{
__shared__ uchar3 temp[BLOCK_SIZE][BLOCK_SIZE + 2 * RADIUS];
// local indices
int lindex_X = threadIdx.x + RADIUS;
int lindex_Y = threadIdx.y;
int dst_x = blockDim.x * blockIdx.x + lindex_X;
int dst_y = blockDim.y * blockIdx.y + lindex_Y;
if (dst_x < cols && dst_y < rows)
{
// Read input elements into shared memory
temp[lindex_Y][lindex_X] = src(dst_y, dst_x);
if (threadIdx.x < RADIUS) {
temp[lindex_Y][lindex_X - RADIUS] = src(dst_y, dst_x - RADIUS);
if (dst_x + BLOCK_SIZE < cols)
temp[lindex_Y][lindex_X + BLOCK_SIZE] = src(dst_y, dst_x + BLOCK_SIZE);
}
}
// Synchronize (ensure all the data is available)
__syncthreads();
if (dst_x < cols - RADIUS && dst_y < rows && dst_x > RADIUS && dst_y > 0)
{
// Apply the kernel
float tmp[3] = { 0,0,0 };
for (int i = -RADIUS; i <= RADIUS; i++)
{
tmp[0] += (float)(temp[lindex_Y][lindex_X + i].x) * d_kernelGaussConv(0, i + RADIUS);
tmp[1] += (float)(temp[lindex_Y][lindex_X + i].y) * d_kernelGaussConv(0, i + RADIUS);
tmp[2] += (float)(temp[lindex_Y][lindex_X + i].z) * d_kernelGaussConv(0, i + RADIUS);
}
dst(dst_y, dst_x).x = (unsigned char)(tmp[0]);
dst(dst_y, dst_x).y = (unsigned char)(tmp[1]);
dst(dst_y, dst_x).z = (unsigned char)(tmp[2]);
}
}
__global__ void gaussianSepVer(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst,
int rows, int cols, cv::cuda::PtrStep<float> d_kernelGaussConv)
{
__shared__ uchar3 temp2[BLOCK_SIZE + 2 * RADIUS][BLOCK_SIZE];
// local indices
int lindex_X2 = threadIdx.x;
int lindex_Y2 = threadIdx.y + RADIUS;
int dst_x2 = blockDim.x * blockIdx.x + lindex_X2;
int dst_y2 = blockDim.y * blockIdx.y + lindex_Y2;
if (dst_x2 < cols && dst_y2 < rows - RADIUS && dst_x2 > 0 && dst_y2 > RADIUS)
{
// Read input elements into shared memory
temp2[lindex_Y2][lindex_X2] = src(dst_y2, dst_x2);
if (threadIdx.y < RADIUS) {
temp2[lindex_Y2 - RADIUS][lindex_X2] = src(dst_y2 - RADIUS, dst_x2);
if (dst_y2 + BLOCK_SIZE < rows)
temp2[lindex_Y2 + BLOCK_SIZE][lindex_X2] = src(dst_y2 + BLOCK_SIZE, dst_x2);
}
}
// Synchronize (ensure all the data is available)
__syncthreads();
if (dst_x2 < cols && dst_y2 < rows)
{
// Apply the kernel
float tmp2[3] = { 0,0,0 };
for (int i = -RADIUS; i <= RADIUS; i++)
{
tmp2[0] += (float)(temp2[lindex_Y2 + i][lindex_X2].x) * d_kernelGaussConv(0, i + RADIUS);
tmp2[1] += (float)(temp2[lindex_Y2 + i][lindex_X2].y) * d_kernelGaussConv(0, i + RADIUS);
tmp2[2] += (float)(temp2[lindex_Y2 + i][lindex_X2].z) * d_kernelGaussConv(0, i + RADIUS);
}
dst(dst_y2, dst_x2).x = (unsigned char)(tmp2[0]);
dst(dst_y2, dst_x2).y = (unsigned char)(tmp2[1]);
dst(dst_y2, dst_x2).z = (unsigned char)(tmp2[2]);
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void gaussianSepCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& d_kernelGaussConv, int hor_pass)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (hor_pass == 1)
{
gaussianSepHor << <grid, block >> > (src, dst, dst.rows, dst.cols, d_kernelGaussConv);
}
else
{
gaussianSepVer << <grid, block >> > (src, dst, dst.rows, dst.cols, d_kernelGaussConv);
}
}
| 14ca703ba672588237b93a0cce686de13ac604f8.cu | #include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
#include <cuda_runtime.h>
#include <chrono> // for high_resolution_clock
#define BLOCK_SIZE 32
#define RADIUS 4
__global__ void gaussianSepHor(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst,
int rows, int cols, cv::cuda::PtrStep<float> d_kernelGaussConv)
{
__shared__ uchar3 temp[BLOCK_SIZE][BLOCK_SIZE + 2 * RADIUS];
// local indices
int lindex_X = threadIdx.x + RADIUS;
int lindex_Y = threadIdx.y;
int dst_x = blockDim.x * blockIdx.x + lindex_X;
int dst_y = blockDim.y * blockIdx.y + lindex_Y;
if (dst_x < cols && dst_y < rows)
{
// Read input elements into shared memory
temp[lindex_Y][lindex_X] = src(dst_y, dst_x);
if (threadIdx.x < RADIUS) {
temp[lindex_Y][lindex_X - RADIUS] = src(dst_y, dst_x - RADIUS);
if (dst_x + BLOCK_SIZE < cols)
temp[lindex_Y][lindex_X + BLOCK_SIZE] = src(dst_y, dst_x + BLOCK_SIZE);
}
}
// Synchronize (ensure all the data is available)
__syncthreads();
if (dst_x < cols - RADIUS && dst_y < rows && dst_x > RADIUS && dst_y > 0)
{
// Apply the kernel
float tmp[3] = { 0,0,0 };
for (int i = -RADIUS; i <= RADIUS; i++)
{
tmp[0] += (float)(temp[lindex_Y][lindex_X + i].x) * d_kernelGaussConv(0, i + RADIUS);
tmp[1] += (float)(temp[lindex_Y][lindex_X + i].y) * d_kernelGaussConv(0, i + RADIUS);
tmp[2] += (float)(temp[lindex_Y][lindex_X + i].z) * d_kernelGaussConv(0, i + RADIUS);
}
dst(dst_y, dst_x).x = (unsigned char)(tmp[0]);
dst(dst_y, dst_x).y = (unsigned char)(tmp[1]);
dst(dst_y, dst_x).z = (unsigned char)(tmp[2]);
}
}
__global__ void gaussianSepVer(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst,
int rows, int cols, cv::cuda::PtrStep<float> d_kernelGaussConv)
{
__shared__ uchar3 temp2[BLOCK_SIZE + 2 * RADIUS][BLOCK_SIZE];
// local indices
int lindex_X2 = threadIdx.x;
int lindex_Y2 = threadIdx.y + RADIUS;
int dst_x2 = blockDim.x * blockIdx.x + lindex_X2;
int dst_y2 = blockDim.y * blockIdx.y + lindex_Y2;
if (dst_x2 < cols && dst_y2 < rows - RADIUS && dst_x2 > 0 && dst_y2 > RADIUS)
{
// Read input elements into shared memory
temp2[lindex_Y2][lindex_X2] = src(dst_y2, dst_x2);
if (threadIdx.y < RADIUS) {
temp2[lindex_Y2 - RADIUS][lindex_X2] = src(dst_y2 - RADIUS, dst_x2);
if (dst_y2 + BLOCK_SIZE < rows)
temp2[lindex_Y2 + BLOCK_SIZE][lindex_X2] = src(dst_y2 + BLOCK_SIZE, dst_x2);
}
}
// Synchronize (ensure all the data is available)
__syncthreads();
if (dst_x2 < cols && dst_y2 < rows)
{
// Apply the kernel
float tmp2[3] = { 0,0,0 };
for (int i = -RADIUS; i <= RADIUS; i++)
{
tmp2[0] += (float)(temp2[lindex_Y2 + i][lindex_X2].x) * d_kernelGaussConv(0, i + RADIUS);
tmp2[1] += (float)(temp2[lindex_Y2 + i][lindex_X2].y) * d_kernelGaussConv(0, i + RADIUS);
tmp2[2] += (float)(temp2[lindex_Y2 + i][lindex_X2].z) * d_kernelGaussConv(0, i + RADIUS);
}
dst(dst_y2, dst_x2).x = (unsigned char)(tmp2[0]);
dst(dst_y2, dst_x2).y = (unsigned char)(tmp2[1]);
dst(dst_y2, dst_x2).z = (unsigned char)(tmp2[2]);
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void gaussianSepCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst, cv::cuda::GpuMat& d_kernelGaussConv, int hor_pass)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (hor_pass == 1)
{
gaussianSepHor << <grid, block >> > (src, dst, dst.rows, dst.cols, d_kernelGaussConv);
}
else
{
gaussianSepVer << <grid, block >> > (src, dst, dst.rows, dst.cols, d_kernelGaussConv);
}
}
|
b6a6cca470e0f0fbc5d65943ac75e372da85aab9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
kblocks magma_int_t
number of blocks
@param[in]
dA magmaDoubleComplex_ptr *
matrix in BCSR
@param[in]
ipiv magmaInt_ptr
array containing pivots
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc(
magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex_ptr *dA,
magmaInt_ptr ipiv,
magma_queue_t queue )
{
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
hipLaunchKernelGGL(( zbcsrlupivloc_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
| b6a6cca470e0f0fbc5d65943ac75e372da85aab9.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_z
#define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)]
#define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)]
#define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b)
#define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b)
//============================================================
#define ldb m
#define lda m
#define ldc m
// every multiprocessor handles one BCSR-block
__global__ void
zbcsrlupivloc_kernel(
int size_b,
int kblocks,
double **A,
magma_int_t *ipiv)
{
if( blockIdx.x < kblocks ) {
if(threadIdx.x < size_b ){
for( int i=0; i<size_b; i++){
int dst = ipiv[i]-1;
if( dst != i ){
double *A1 = A[blockIdx.x]+threadIdx.x*size_b+i;
double *A2 = A[blockIdx.x]+threadIdx.x*size_b+dst;
double tmp = *A2;
*A2 = *A1;
*A1 = tmp;
}
}
}
}
}
/**
Purpose
-------
For a Block-CSR ILU factorization, this routine updates all blocks in
the trailing matrix.
Arguments
---------
@param[in]
size_b magma_int_t
blocksize in BCSR
@param[in]
kblocks magma_int_t
number of blocks
@param[in]
dA magmaDoubleComplex_ptr *
matrix in BCSR
@param[in]
ipiv magmaInt_ptr
array containing pivots
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbcsrlupivloc(
magma_int_t size_b,
magma_int_t kblocks,
magmaDoubleComplex_ptr *dA,
magmaInt_ptr ipiv,
magma_queue_t queue )
{
#if defined(PRECISION_d)
dim3 threads( 64, 1 );
dim3 grid(kblocks, 1, 1);
zbcsrlupivloc_kernel<<< grid, threads, 0, queue->cuda_stream() >>>(
size_b, kblocks, dA, ipiv );
#endif
return MAGMA_SUCCESS;
}
|
8fa98e3d3004fe5306e759c9841ad2bb34a778a5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "backward_maxpool_layer_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int in_h = 1;
int in_w = 1;
int in_c = 1;
int stride = 2;
int size = XSIZE*YSIZE;
float *delta = NULL;
hipMalloc(&delta, XSIZE*YSIZE);
float *prev_delta = NULL;
hipMalloc(&prev_delta, XSIZE*YSIZE);
int *indexes = NULL;
hipMalloc(&indexes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
backward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride,size,delta,prev_delta,indexes);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
backward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride,size,delta,prev_delta,indexes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
backward_maxpool_layer_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,in_h,in_w,in_c,stride,size,delta,prev_delta,indexes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8fa98e3d3004fe5306e759c9841ad2bb34a778a5.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "backward_maxpool_layer_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
int in_h = 1;
int in_w = 1;
int in_c = 1;
int stride = 2;
int size = XSIZE*YSIZE;
float *delta = NULL;
cudaMalloc(&delta, XSIZE*YSIZE);
float *prev_delta = NULL;
cudaMalloc(&prev_delta, XSIZE*YSIZE);
int *indexes = NULL;
cudaMalloc(&indexes, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
backward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride,size,delta,prev_delta,indexes);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
backward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride,size,delta,prev_delta,indexes);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
backward_maxpool_layer_kernel<<<gridBlock,threadBlock>>>(n,in_h,in_w,in_c,stride,size,delta,prev_delta,indexes);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6f64a1f68169be9c8bb339b279a12301987f95da.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/one_hot_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename InT, typename OutT>
__global__ void FillOutputKernel(const InT* p_in_data,
OutT* p_out_data,
const int64_t numel,
const int depth) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) {
*(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0;
}
}
template <typename DeviceContext, typename InT>
struct OneHotV2OpCUDAFunctor {
const DenseTensor* in_;
DenseTensor* out_;
const DeviceContext& ctx_;
int depth_;
OneHotV2OpCUDAFunctor(const DenseTensor* in,
DenseTensor* out,
int depth,
const DeviceContext& ctx)
: in_(in), out_(out), depth_(depth), ctx_(ctx) {}
template <typename OutT>
void apply() const {
auto* p_in_data = in_->data<InT>();
auto numel = in_->numel();
auto* p_out_data = ctx_.template Alloc<OutT>(out_);
auto stream = ctx_.stream();
funcs::set_constant(ctx_, out_, 0.0);
hipLaunchKernelGGL(( FillOutputKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS),
0,
stream, p_in_data, p_out_data, numel, depth_);
}
};
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out) {
auto out_dims = out->dims();
if (out_dims[out_dims.size() - 1] == -1) {
out_dims[out_dims.size() - 1] = depth;
out->Resize(out_dims);
}
phi::VisitDataType(
dtype, OneHotV2OpCUDAFunctor<Context, T>(&x, out, depth, dev_ctx));
}
} // namespace phi
PD_REGISTER_KERNEL(
one_hot_raw, GPU, ALL_LAYOUT, phi::OneHotRawKernel, int, int64_t) {}
| 6f64a1f68169be9c8bb339b279a12301987f95da.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/one_hot_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace phi {
using paddle::platform::PADDLE_CUDA_NUM_THREADS;
template <typename InT, typename OutT>
__global__ void FillOutputKernel(const InT* p_in_data,
OutT* p_out_data,
const int64_t numel,
const int depth) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < numel && p_in_data[idx] >= 0 && p_in_data[idx] < depth) {
*(p_out_data + (idx * depth) + p_in_data[idx]) = 1.0;
}
}
template <typename DeviceContext, typename InT>
struct OneHotV2OpCUDAFunctor {
const DenseTensor* in_;
DenseTensor* out_;
const DeviceContext& ctx_;
int depth_;
OneHotV2OpCUDAFunctor(const DenseTensor* in,
DenseTensor* out,
int depth,
const DeviceContext& ctx)
: in_(in), out_(out), depth_(depth), ctx_(ctx) {}
template <typename OutT>
void apply() const {
auto* p_in_data = in_->data<InT>();
auto numel = in_->numel();
auto* p_out_data = ctx_.template Alloc<OutT>(out_);
auto stream = ctx_.stream();
funcs::set_constant(ctx_, out_, 0.0);
FillOutputKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS,
0,
stream>>>(p_in_data, p_out_data, numel, depth_);
}
};
template <typename T, typename Context>
void OneHotRawKernel(const Context& dev_ctx,
const DenseTensor& x,
int32_t depth,
DataType dtype,
bool allow_out_of_range,
DenseTensor* out) {
auto out_dims = out->dims();
if (out_dims[out_dims.size() - 1] == -1) {
out_dims[out_dims.size() - 1] = depth;
out->Resize(out_dims);
}
phi::VisitDataType(
dtype, OneHotV2OpCUDAFunctor<Context, T>(&x, out, depth, dev_ctx));
}
} // namespace phi
PD_REGISTER_KERNEL(
one_hot_raw, GPU, ALL_LAYOUT, phi::OneHotRawKernel, int, int64_t) {}
|
725fca52deeb9f5d417ce941f309c4f7d843c470.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by search_multi_cta_00_generate.py
*
* Make changes there and run in this directory:
*
* > python search_multi_cta_00_generate.py
*
*/
#include <raft/neighbors/detail/cagra/search_multi_cta_kernel-inl.cuh>
namespace raft::neighbors::cagra::detail::multi_cta_search {
#define instantiate_kernel_selection(TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \
template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t block_size, \
uint32_t result_buffer_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
uint32_t num_cta_per_query, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_width, \
size_t min_iterations, \
size_t max_iterations, \
hipStream_t stream);
instantiate_kernel_selection(32, 512, float, uint64_t, float);
#undef instantiate_kernel_selection
} // namespace raft::neighbors::cagra::detail::multi_cta_search
| 725fca52deeb9f5d417ce941f309c4f7d843c470.cu |
/*
* Copyright (c) 2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: this file is generated by search_multi_cta_00_generate.py
*
* Make changes there and run in this directory:
*
* > python search_multi_cta_00_generate.py
*
*/
#include <raft/neighbors/detail/cagra/search_multi_cta_kernel-inl.cuh>
namespace raft::neighbors::cagra::detail::multi_cta_search {
#define instantiate_kernel_selection(TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T) \
template void select_and_run<TEAM_SIZE, MAX_DATASET_DIM, DATA_T, INDEX_T, DISTANCE_T>( \
raft::device_matrix_view<const DATA_T, int64_t, layout_stride> dataset, \
raft::device_matrix_view<const INDEX_T, int64_t, row_major> graph, \
INDEX_T* const topk_indices_ptr, \
DISTANCE_T* const topk_distances_ptr, \
const DATA_T* const queries_ptr, \
const uint32_t num_queries, \
const INDEX_T* dev_seed_ptr, \
uint32_t* const num_executed_iterations, \
uint32_t topk, \
uint32_t block_size, \
uint32_t result_buffer_size, \
uint32_t smem_size, \
int64_t hash_bitlen, \
INDEX_T* hashmap_ptr, \
uint32_t num_cta_per_query, \
uint32_t num_random_samplings, \
uint64_t rand_xor_mask, \
uint32_t num_seeds, \
size_t itopk_size, \
size_t search_width, \
size_t min_iterations, \
size_t max_iterations, \
cudaStream_t stream);
instantiate_kernel_selection(32, 512, float, uint64_t, float);
#undef instantiate_kernel_selection
} // namespace raft::neighbors::cagra::detail::multi_cta_search
|
b13174a3206654f8fa05fa2721826e7a4699557a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
__global__ void kernel( void ) {
}
int main( void ) {hipLaunchKernelGGL((
kernel), dim3(1),dim3(1), 0, 0, );
printf( "Hello, World!\n" );
return 0;
}
| b13174a3206654f8fa05fa2721826e7a4699557a.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
__global__ void kernel( void ) {
}
int main( void ) {
kernel<<<1,1>>>();
printf( "Hello, World!\n" );
return 0;
}
|
f6a0b840bffcb7bf12e89cb7a840e8ba9870e3bc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_kernelpp_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int num_projections = 1;
float mov = 1;
int num_pixels = 1;
int num_grid = 1;
int num_slices = 1;
float *dev_gridx = NULL;
hipMalloc(&dev_gridx, XSIZE*YSIZE);
float *dev_gridy = NULL;
hipMalloc(&dev_gridy, XSIZE*YSIZE);
float *dev_suma = NULL;
hipMalloc(&dev_suma, XSIZE*YSIZE);
float *dev_E = NULL;
hipMalloc(&dev_E, XSIZE*YSIZE);
float *dev_data = NULL;
hipMalloc(&dev_data, XSIZE*YSIZE);
float *dev_recon = NULL;
hipMalloc(&dev_recon, XSIZE*YSIZE);
float *dev_theta = NULL;
hipMalloc(&dev_theta, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_kernelpp_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, num_projections,mov,num_pixels,num_grid,num_slices,dev_gridx,dev_gridy,dev_suma,dev_E,dev_data,dev_recon,dev_theta);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_kernelpp_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, num_projections,mov,num_pixels,num_grid,num_slices,dev_gridx,dev_gridy,dev_suma,dev_E,dev_data,dev_recon,dev_theta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_kernelpp_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, num_projections,mov,num_pixels,num_grid,num_slices,dev_gridx,dev_gridy,dev_suma,dev_E,dev_data,dev_recon,dev_theta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f6a0b840bffcb7bf12e89cb7a840e8ba9870e3bc.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_kernelpp_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int num_projections = 1;
float mov = 1;
int num_pixels = 1;
int num_grid = 1;
int num_slices = 1;
float *dev_gridx = NULL;
cudaMalloc(&dev_gridx, XSIZE*YSIZE);
float *dev_gridy = NULL;
cudaMalloc(&dev_gridy, XSIZE*YSIZE);
float *dev_suma = NULL;
cudaMalloc(&dev_suma, XSIZE*YSIZE);
float *dev_E = NULL;
cudaMalloc(&dev_E, XSIZE*YSIZE);
float *dev_data = NULL;
cudaMalloc(&dev_data, XSIZE*YSIZE);
float *dev_recon = NULL;
cudaMalloc(&dev_recon, XSIZE*YSIZE);
float *dev_theta = NULL;
cudaMalloc(&dev_theta, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_kernelpp_cuda<<<gridBlock,threadBlock>>>(num_projections,mov,num_pixels,num_grid,num_slices,dev_gridx,dev_gridy,dev_suma,dev_E,dev_data,dev_recon,dev_theta);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_kernelpp_cuda<<<gridBlock,threadBlock>>>(num_projections,mov,num_pixels,num_grid,num_slices,dev_gridx,dev_gridy,dev_suma,dev_E,dev_data,dev_recon,dev_theta);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_kernelpp_cuda<<<gridBlock,threadBlock>>>(num_projections,mov,num_pixels,num_grid,num_slices,dev_gridx,dev_gridy,dev_suma,dev_E,dev_data,dev_recon,dev_theta);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f4169cb0088b13b2050e71fda17560e9c26149fd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <assert.h>
#include <glog/logging.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <sys/mman.h>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
#if __CUDA_ARCH__ < 200
int CUDA_ARCH = 100;
#else
int CUDA_ARCH = 200;
#endif
using namespace std;
int main() {
cout << "CUDA_ARCH = " << CUDA_ARCH << endl;
int driverVersion;
CUDA_CHECK(hipDriverGetVersion(&driverVersion));
cout << "driverVersion = " << driverVersion << endl;
hipDeviceProp_t deviceProp;
int devID = 0;
CUDA_CHECK(hipGetDeviceProperties(&deviceProp, devID));
cout << "deviceProp.major = " << deviceProp.major << endl;
cout << "deviceProp.minor = " << deviceProp.minor << endl;
if (((deviceProp.major << 4) + deviceProp.minor) < 0x20) {
cout << "binomialOptions requires Compute Capability of SM 2.0 or higher to run.\n";
hipDeviceReset();
exit(0);
}
size_t count = 0;
size_t size = 64 * 1024 * 1024 * sizeof(float);
while (true) {
void *host_array;
if (hipHostMalloc(&host_array, size) == hipSuccess) {
// memset(host_array, 0, size);
count++;
cout << "Allocated " << count * 256 << " MB" << endl;
} else {
cout << "Allocation failed at " << count * 256 << " MB" << endl;
}
}
// CUDA_CHECK(hipHostMalloc(&host_array, size, hipHostMallocMapped));
// CUDA_CHECK(hipMalloc(&host_array, size));
// CHECK(host_array = malloc(size));
// CHECK_EQ(mlock(host_array, size), 0);
} | f4169cb0088b13b2050e71fda17560e9c26149fd.cu | #include <iostream>
#include <assert.h>
#include <glog/logging.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/mman.h>
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
#if __CUDA_ARCH__ < 200
int CUDA_ARCH = 100;
#else
int CUDA_ARCH = 200;
#endif
using namespace std;
int main() {
cout << "CUDA_ARCH = " << CUDA_ARCH << endl;
int driverVersion;
CUDA_CHECK(cudaDriverGetVersion(&driverVersion));
cout << "driverVersion = " << driverVersion << endl;
cudaDeviceProp deviceProp;
int devID = 0;
CUDA_CHECK(cudaGetDeviceProperties(&deviceProp, devID));
cout << "deviceProp.major = " << deviceProp.major << endl;
cout << "deviceProp.minor = " << deviceProp.minor << endl;
if (((deviceProp.major << 4) + deviceProp.minor) < 0x20) {
cout << "binomialOptions requires Compute Capability of SM 2.0 or higher to run.\n";
cudaDeviceReset();
exit(0);
}
size_t count = 0;
size_t size = 64 * 1024 * 1024 * sizeof(float);
while (true) {
void *host_array;
if (cudaMallocHost(&host_array, size) == cudaSuccess) {
// memset(host_array, 0, size);
count++;
cout << "Allocated " << count * 256 << " MB" << endl;
} else {
cout << "Allocation failed at " << count * 256 << " MB" << endl;
}
}
// CUDA_CHECK(cudaHostAlloc(&host_array, size, cudaHostAllocMapped));
// CUDA_CHECK(cudaMalloc(&host_array, size));
// CHECK(host_array = malloc(size));
// CHECK_EQ(mlock(host_array, size), 0);
} |
1e30a1f4818ea98c135f6e4b3fe0fe44f0b8d8fb.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__host__ __device__ __forceinline__ VMin4() {}
__host__ __device__ __forceinline__ VMin4(const VMin4&) {}
};
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__host__ __device__ __forceinline__ VMin2() {}
__host__ __device__ __forceinline__ VMin2(const VMin2&) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::cuda::device::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__host__ __device__ __forceinline__ VMax4() {}
__host__ __device__ __forceinline__ VMax4(const VMax4&) {}
};
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__host__ __device__ __forceinline__ VMax2() {}
__host__ __device__ __forceinline__ VMax2(const VMax2&) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, hipStream_t stream)
{
device::transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, hipStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::cuda::device::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, hipStream_t stream);
}
#endif // CUDA_DISABLER
| 1e30a1f4818ea98c135f6e4b3fe0fe44f0b8d8fb.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/functional.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/simd_functions.hpp"
#include "arithm_func_traits.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
//////////////////////////////////////////////////////////////////////////
// min
namespace arithm
{
struct VMin4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin4(a, b);
}
__host__ __device__ __forceinline__ VMin4() {}
__host__ __device__ __forceinline__ VMin4(const VMin4&) {}
};
struct VMin2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmin2(a, b);
}
__host__ __device__ __forceinline__ VMin2() {}
__host__ __device__ __forceinline__ VMin2(const VMin2&) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMin4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMin2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< minimum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< minimum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void minMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, VMin4(), WithOutMask(), stream);
}
void minMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, VMin2(), WithOutMask(), stream);
}
template <typename T> void minMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, minimum<T>(), WithOutMask(), stream);
}
template void minMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void minMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void minScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::cuda::device::bind2nd(minimum<T>(), src2), WithOutMask(), stream);
}
template void minScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void minScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
//////////////////////////////////////////////////////////////////////////
// max
namespace arithm
{
struct VMax4 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax4(a, b);
}
__host__ __device__ __forceinline__ VMax4() {}
__host__ __device__ __forceinline__ VMax4(const VMax4&) {}
};
struct VMax2 : binary_function<uint, uint, uint>
{
__device__ __forceinline__ uint operator ()(uint a, uint b) const
{
return vmax2(a, b);
}
__host__ __device__ __forceinline__ VMax2() {}
__host__ __device__ __forceinline__ VMax2(const VMax2&) {}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits< arithm::VMax4 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <> struct TransformFunctorTraits< arithm::VMax2 > : arithm::ArithmFuncTraits<sizeof(uint), sizeof(uint)>
{
};
template <typename T> struct TransformFunctorTraits< maximum<T> > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
template <typename T> struct TransformFunctorTraits< binder2nd< maximum<T> > > : arithm::ArithmFuncTraits<sizeof(T), sizeof(T)>
{
};
}}}
namespace arithm
{
void maxMat_v4(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, VMax4(), WithOutMask(), stream);
}
void maxMat_v2(PtrStepSz<uint> src1, PtrStepSz<uint> src2, PtrStepSz<uint> dst, cudaStream_t stream)
{
device::transform(src1, src2, dst, VMax2(), WithOutMask(), stream);
}
template <typename T> void maxMat(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) src2, (PtrStepSz<T>) dst, maximum<T>(), WithOutMask(), stream);
}
template void maxMat<uchar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<schar >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<ushort>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<short >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<int >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<float >(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template void maxMat<double>(PtrStepSzb src1, PtrStepSzb src2, PtrStepSzb dst, cudaStream_t stream);
template <typename T> void maxScalar(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream)
{
device::transform((PtrStepSz<T>) src1, (PtrStepSz<T>) dst, cv::cuda::device::bind2nd(maximum<T>(), src2), WithOutMask(), stream);
}
template void maxScalar<uchar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<schar >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<ushort>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<short >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<int >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<float >(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
template void maxScalar<double>(PtrStepSzb src1, double src2, PtrStepSzb dst, cudaStream_t stream);
}
#endif // CUDA_DISABLER
|
7d2192839c2aceb72de9687b222734d34523c3c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaclaw5_update_q_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mbc = 1;
double dtdx = 1;
double dtdy = 1;
double *qold = NULL;
hipMalloc(&qold, XSIZE*YSIZE);
double *fm = NULL;
hipMalloc(&fm, XSIZE*YSIZE);
double *fp = NULL;
hipMalloc(&fp, XSIZE*YSIZE);
double *gm = NULL;
hipMalloc(&gm, XSIZE*YSIZE);
double *gp = NULL;
hipMalloc(&gp, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaclaw5_update_q_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, mbc,dtdx,dtdy,qold,fm,fp,gm,gp);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaclaw5_update_q_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, mbc,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaclaw5_update_q_cuda), dim3(gridBlock),dim3(threadBlock), 0, 0, mbc,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7d2192839c2aceb72de9687b222734d34523c3c0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaclaw5_update_q_cuda.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int mbc = 1;
double dtdx = 1;
double dtdy = 1;
double *qold = NULL;
cudaMalloc(&qold, XSIZE*YSIZE);
double *fm = NULL;
cudaMalloc(&fm, XSIZE*YSIZE);
double *fp = NULL;
cudaMalloc(&fp, XSIZE*YSIZE);
double *gm = NULL;
cudaMalloc(&gm, XSIZE*YSIZE);
double *gp = NULL;
cudaMalloc(&gp, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaclaw5_update_q_cuda<<<gridBlock,threadBlock>>>(mbc,dtdx,dtdy,qold,fm,fp,gm,gp);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaclaw5_update_q_cuda<<<gridBlock,threadBlock>>>(mbc,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaclaw5_update_q_cuda<<<gridBlock,threadBlock>>>(mbc,dtdx,dtdy,qold,fm,fp,gm,gp);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fa93dbaff673504f202ba867d05bc8aaf41ecce9.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 8
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.05f
#define rule2Scale 0.1f
#define rule3Scale 0.5f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_coherentPos;
glm::vec3 *dev_coherentVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount; // resolution
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
// position of each body
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
// velocity 1 of each body
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
// velocity 2 of each body
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> > (1, numObjects,
dev_vel1, maxSpeed);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
hipMalloc((void**)&dev_coherentPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc ddev_coherentPos failed!");
hipMalloc((void**)&dev_coherentVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_coherentVel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// vector for each rule
glm::vec3 v1, v2, v3;
// neighbour count
int n1=0, n2=0, n3=0;
for (int i = 0; i < N; i++) {
if (i != iSelf) {
float d = glm::distance(pos[i], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (d < rule1Distance && ++n1)
v1 += pos[i];
// Rule 2: boids try to stay a distance d away from each other
if (d < rule2Distance && ++n2)
v2 -= (pos[i] - pos[iSelf]);
// Rule 3: boids try to match the speed of surrounding boids
if (d < rule3Distance && ++n3)
v3 += vel[i];
}
}
if (n1) {
v1 = (v1/float(n1) - pos[iSelf]) * rule1Scale;
}
if (n2) {
v2 *= rule2Scale;
}
if (n3) {
// v3 = (v3 - vel[iSelf]) * rule3Scale;
v3 = (v3/float(n3) - vel[iSelf]) * rule3Scale;
}
return v1 + v2 + v3;
}
__device__ void clampSpeed(glm::vec3& thisVel) {
thisVel = glm::normalize(thisVel) * maxSpeed;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisVel = vel1[index];
thisVel += computeVelocityChange(N, index, pos, vel1);
clampSpeed(thisVel);
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = thisVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
/// x, y, z are in the range of [0, gridResolution-1]
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
// shift global position to grid 3D index
glm::vec3 gridIdx = (thisPos - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(gridIdx.x, gridIdx.y, gridIdx.z, gridResolution);
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.\
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int thisIndex = particleGridIndices[index];
int lastIndex = index > 0 ? particleGridIndices[index - 1] : -1;
if (thisIndex != lastIndex) {
gridCellStartIndices[thisIndex] = index;
if (lastIndex != -1)
gridCellEndIndices[lastIndex] = index - 1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 thisVel = vel1[index];
glm::vec3 gridPos = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridIdx(round(gridPos.x), round(gridPos.y), round(gridPos.z));
glm::vec3 v1(0), v2(0), v3(0);
int n1 = 0, n2 = 0, n3 = 0;
int maxCellIdx = gridResolution * gridResolution * gridResolution - 1;
int iteration = 0;
for (int iZ = gridIdx.z-1; iZ <= gridIdx.z; iZ++)
for (int iY = gridIdx.y-1; iY <= gridIdx.y; iY++)
for (int iX = gridIdx.x-1; iX <= gridIdx.x; iX++) {
int cellIdx = gridIndex3Dto1D(iX, iY, iZ, gridResolution);
if (cellIdx < 0 || cellIdx > maxCellIdx)
continue;
// - For each cell, read the start/end indices in the boid pointer array.
int startIdx = gridCellStartIndices[cellIdx];
int endIdx = gridCellEndIndices[cellIdx];
if (startIdx < 0)
continue;
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int j = startIdx; j <= endIdx; j++) {
int i = particleArrayIndices[j];
if (i != index) {
float d = glm::distance(pos[i], thisPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (d < rule1Distance && ++n1)
v1 += pos[i];
// Rule 2: boids try to stay a distance d away from each other
if (d < rule2Distance && ++n2)
v2 -= (pos[i] - thisPos);
// Rule 3: boids try to match the speed of surrounding boids
if (d < rule3Distance && ++n3)
v3 += vel1[i];
}
++iteration;
}
}
if (n1) {
v1 = (v1 / float(n1) - thisPos) * rule1Scale;
}
if (n2) {
v2 *= rule2Scale;
}
if (n3) {
v3 = (v3 / float(n3) - thisVel) * rule3Scale;
}
thisVel += v1 + v2 + v3;
// - Clamp the speed change before putting the new speed in vel2
clampSpeed(thisVel);
vel2[index] = thisVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 thisVel = vel1[index];
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 gridPos = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridIdx(round(gridPos.x), round(gridPos.y), round(gridPos.z));
glm::vec3 v1(0), v2(0), v3(0);
int n1 = 0, n2 = 0, n3 = 0;
int cellIdx = -1;
for (int iZ = gridIdx.z - 1; iZ <= gridIdx.z; iZ++)
for (int iY = gridIdx.y - 1; iY <= gridIdx.y; iY++)
for (int iX = gridIdx.x - 1; iX <= gridIdx.x; iX++) {
cellIdx = gridIndex3Dto1D(iX, iY, iZ, gridResolution);
if (cellIdx < 0 || cellIdx > gridResolution * gridResolution * gridResolution)
continue;
// - For each cell, read the start/end indices in the boid pointer array.
int startIdx = gridCellStartIndices[cellIdx];
int endIdx = gridCellEndIndices[cellIdx];
if (startIdx < 0)
continue;
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int j = startIdx; j <= endIdx; j++) {
float d = glm::distance(pos[j], thisPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (d < rule1Distance && ++n1)
v1 += pos[j];
// Rule 2: boids try to stay a distance d away from each other
if (d < rule2Distance && ++n2)
v2 -= (pos[j] - thisPos);
// Rule 3: boids try to match the speed of surrounding boids
if (d < rule3Distance && ++n3)
v3 += vel1[j];
}
}
if (n1) {
v1 = (v1 / float(n1) - thisPos) * rule1Scale;
}
if (n2) {
v2 *= rule2Scale;
}
if (n3) {
v3 = (v3 / float(n3) - thisVel) * rule3Scale;
}
thisVel += v1 + v2 + v3;
// - Clamp the speed change before putting the new speed in vel2
clampSpeed(thisVel);
vel2[index] = glm::normalize(thisVel) * maxSpeed;
}
__global__ void kernRearrangeBoidData(int N, int* arrayIndices, glm::vec3* pos, glm::vec3* coherentPos, glm::vec3* vel, glm::vec3* coherentVel)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int pointerIdx = arrayIndices[index];
coherentPos[index] = pos[pointerIdx];
coherentVel[index] = vel[pointerIdx];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
hipDeviceSynchronize();
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
hipDeviceSynchronize();
// TODO-1.2 ping-pong the velocity buffers
glm::vec3 *tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 gridCellThreadNum((gridCellCount + blockSize - 1) / blockSize);
// Wipe grid data
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(gridCellThreadNum), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellStartIndices, -1);
hipLaunchKernelGGL(( kernResetIntBuffer), dim3(gridCellThreadNum), dim3(blockSize), 0, 0, gridCellCount, dev_gridCellEndIndices, numObjects-1);
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("ComputeIndices failed!");
hipDeviceSynchronize();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
hipDeviceSynchronize();
hipLaunchKernelGGL(( kernIdentifyCellStartEnd) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("IdentifyCellStartEnd failed!");
hipDeviceSynchronize();
// - Perform velocity updates using neighbor search
hipLaunchKernelGGL(( kernUpdateVelNeighborSearchScattered) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0,
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("UpdateVelNeighborSearchScattered failed!");
hipDeviceSynchronize();
// - Update positions
hipLaunchKernelGGL(( kernUpdatePos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("UpdatePos failed!");
hipDeviceSynchronize();
// - Ping-pong buffers as needed
glm::vec3* tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Wipe grid data
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, numObjects - 1);
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("ComputeIndices failed!");
hipDeviceSynchronize();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
hipDeviceSynchronize();
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("IdentifyCellStartEnd failed!");
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
hipLaunchKernelGGL(( kernRearrangeBoidData), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_particleArrayIndices, dev_pos, dev_coherentPos, dev_vel1, dev_coherentVel);
checkCUDAErrorWithLine("rearrange boid data failed!");
hipDeviceSynchronize();
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_coherentPos, dev_coherentVel, dev_vel2);
checkCUDAErrorWithLine("UpdateVelNeighborSearchScattered failed!");
hipDeviceSynchronize();
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_coherentPos, dev_vel2);
checkCUDAErrorWithLine("UpdatePos failed!");
hipDeviceSynchronize();
// - Ping-pong buffers as needed
// at this point, dev_vel2 should be coherent because it's based on dev_coherentVel
// we just make sure that dev_vel1 becomes coherent as well since the next iteration is based on vel1 first
// TODO: maybe we can use vel1 as the coherentVel to begin with.
glm::vec3* tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
// make sure dev_pos is coherent for the next iteration
tmp = dev_pos;
dev_pos = dev_coherentPos;
dev_coherentPos = tmp;
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
hipFree(dev_coherentPos);
hipFree(dev_coherentVel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int* dev_intKeys;
int* dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// 2.1 Unit Tests
int gridSize = 7;
int* dev_startIndices;
int* dev_endIndices;
hipMalloc((void**)&dev_startIndices, gridSize * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_startIndices failed!");
hipMalloc((void**)&dev_endIndices, gridSize * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_endIndices failed!");
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridSize, dev_startIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridSize, dev_endIndices, N - 1);
hipDeviceSynchronize();
std::unique_ptr<int[]>startIndices{ new int[gridSize] };
std::unique_ptr<int[]>endIndices{ new int[gridSize] };
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (N, dev_intKeys, dev_startIndices, dev_endIndices);
hipMemcpy(startIndices.get(), dev_startIndices, sizeof(int) * gridSize, hipMemcpyDeviceToHost);
hipMemcpy(endIndices.get(), dev_endIndices, sizeof(int) * gridSize, hipMemcpyDeviceToHost);
std::cout << "grid cell: " << std::endl;
for (int i = 0; i < gridSize; i++) {
std::cout << " cell: " << i;
std::cout << " start: " << startIndices[i];
std::cout << " end: " << endIndices[i] << std::endl;
}
int* dev_gridIndices;
int* dev_arrayIndices;
hipMalloc((void**)&dev_gridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridIndices failed!");
hipMalloc((void**)&dev_arrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_arrayIndices failed!");
glm::vec3* dev_p;
std::unique_ptr<glm::vec3[]>pos{ new glm::vec3[N] };
hipMalloc((void**)&dev_p, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_p failed!");
hipLaunchKernelGGL(( kernGenerateRandomPosArray) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, 1, N, dev_p, 1.0f);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
float gridCellWidth = 1.0f;
hipLaunchKernelGGL(( kernComputeIndices), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, N, 2, glm::vec3(-1, -1, -1) * gridCellWidth, 1.0f / gridCellWidth,
dev_p, dev_arrayIndices, dev_gridIndices);
std::unique_ptr<int[]>gridIndices{ new int[N] };
std::unique_ptr<int[]>arrayIndices{ new int[N] };
hipMemcpy(gridIndices.get(), dev_gridIndices, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(arrayIndices.get(), dev_arrayIndices, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(pos.get(), dev_p, sizeof(glm::vec3) * N, hipMemcpyDeviceToHost);
std::cout << "compute indices: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " pos: " << pos[i].x << " " << pos[i].y << " " << pos[i].z;
std::cout << " grid: " << gridIndices[i];
std::cout << " array: " << arrayIndices[i] << std::endl;
}
// cleanup
hipFree(dev_intKeys);
hipFree(dev_intValues);
hipFree(dev_startIndices);
hipFree(dev_endIndices);
hipFree(dev_gridIndices);
hipFree(dev_arrayIndices);
hipFree(dev_p);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| fa93dbaff673504f202ba867d05bc8aaf41ecce9.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 8
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.05f
#define rule2Scale 0.1f
#define rule3Scale 0.5f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_coherentPos;
glm::vec3 *dev_coherentVel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount; // resolution
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
// position of each body
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
// velocity 1 of each body
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
// velocity 2 of each body
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> > (1, numObjects,
dev_vel1, maxSpeed);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
cudaMalloc((void**)&dev_coherentPos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc ddev_coherentPos failed!");
cudaMalloc((void**)&dev_coherentVel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_coherentVel failed!");
cudaDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
// vector for each rule
glm::vec3 v1, v2, v3;
// neighbour count
int n1=0, n2=0, n3=0;
for (int i = 0; i < N; i++) {
if (i != iSelf) {
float d = glm::distance(pos[i], pos[iSelf]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (d < rule1Distance && ++n1)
v1 += pos[i];
// Rule 2: boids try to stay a distance d away from each other
if (d < rule2Distance && ++n2)
v2 -= (pos[i] - pos[iSelf]);
// Rule 3: boids try to match the speed of surrounding boids
if (d < rule3Distance && ++n3)
v3 += vel[i];
}
}
if (n1) {
v1 = (v1/float(n1) - pos[iSelf]) * rule1Scale;
}
if (n2) {
v2 *= rule2Scale;
}
if (n3) {
// v3 = (v3 - vel[iSelf]) * rule3Scale;
v3 = (v3/float(n3) - vel[iSelf]) * rule3Scale;
}
return v1 + v2 + v3;
}
__device__ void clampSpeed(glm::vec3& thisVel) {
thisVel = glm::normalize(thisVel) * maxSpeed;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisVel = vel1[index];
thisVel += computeVelocityChange(N, index, pos, vel1);
clampSpeed(thisVel);
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = thisVel;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
/// x, y, z are in the range of [0, gridResolution-1]
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
// shift global position to grid 3D index
glm::vec3 gridIdx = (thisPos - gridMin) * inverseCellWidth;
gridIndices[index] = gridIndex3Dto1D(gridIdx.x, gridIdx.y, gridIdx.z, gridResolution);
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.\
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int thisIndex = particleGridIndices[index];
int lastIndex = index > 0 ? particleGridIndices[index - 1] : -1;
if (thisIndex != lastIndex) {
gridCellStartIndices[thisIndex] = index;
if (lastIndex != -1)
gridCellEndIndices[lastIndex] = index - 1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 thisVel = vel1[index];
glm::vec3 gridPos = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridIdx(round(gridPos.x), round(gridPos.y), round(gridPos.z));
glm::vec3 v1(0), v2(0), v3(0);
int n1 = 0, n2 = 0, n3 = 0;
int maxCellIdx = gridResolution * gridResolution * gridResolution - 1;
int iteration = 0;
for (int iZ = gridIdx.z-1; iZ <= gridIdx.z; iZ++)
for (int iY = gridIdx.y-1; iY <= gridIdx.y; iY++)
for (int iX = gridIdx.x-1; iX <= gridIdx.x; iX++) {
int cellIdx = gridIndex3Dto1D(iX, iY, iZ, gridResolution);
if (cellIdx < 0 || cellIdx > maxCellIdx)
continue;
// - For each cell, read the start/end indices in the boid pointer array.
int startIdx = gridCellStartIndices[cellIdx];
int endIdx = gridCellEndIndices[cellIdx];
if (startIdx < 0)
continue;
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int j = startIdx; j <= endIdx; j++) {
int i = particleArrayIndices[j];
if (i != index) {
float d = glm::distance(pos[i], thisPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (d < rule1Distance && ++n1)
v1 += pos[i];
// Rule 2: boids try to stay a distance d away from each other
if (d < rule2Distance && ++n2)
v2 -= (pos[i] - thisPos);
// Rule 3: boids try to match the speed of surrounding boids
if (d < rule3Distance && ++n3)
v3 += vel1[i];
}
++iteration;
}
}
if (n1) {
v1 = (v1 / float(n1) - thisPos) * rule1Scale;
}
if (n2) {
v2 *= rule2Scale;
}
if (n3) {
v3 = (v3 / float(n3) - thisVel) * rule3Scale;
}
thisVel += v1 + v2 + v3;
// - Clamp the speed change before putting the new speed in vel2
clampSpeed(thisVel);
vel2[index] = thisVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
glm::vec3 thisVel = vel1[index];
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 gridPos = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridIdx(round(gridPos.x), round(gridPos.y), round(gridPos.z));
glm::vec3 v1(0), v2(0), v3(0);
int n1 = 0, n2 = 0, n3 = 0;
int cellIdx = -1;
for (int iZ = gridIdx.z - 1; iZ <= gridIdx.z; iZ++)
for (int iY = gridIdx.y - 1; iY <= gridIdx.y; iY++)
for (int iX = gridIdx.x - 1; iX <= gridIdx.x; iX++) {
cellIdx = gridIndex3Dto1D(iX, iY, iZ, gridResolution);
if (cellIdx < 0 || cellIdx > gridResolution * gridResolution * gridResolution)
continue;
// - For each cell, read the start/end indices in the boid pointer array.
int startIdx = gridCellStartIndices[cellIdx];
int endIdx = gridCellEndIndices[cellIdx];
if (startIdx < 0)
continue;
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int j = startIdx; j <= endIdx; j++) {
float d = glm::distance(pos[j], thisPos);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (d < rule1Distance && ++n1)
v1 += pos[j];
// Rule 2: boids try to stay a distance d away from each other
if (d < rule2Distance && ++n2)
v2 -= (pos[j] - thisPos);
// Rule 3: boids try to match the speed of surrounding boids
if (d < rule3Distance && ++n3)
v3 += vel1[j];
}
}
if (n1) {
v1 = (v1 / float(n1) - thisPos) * rule1Scale;
}
if (n2) {
v2 *= rule2Scale;
}
if (n3) {
v3 = (v3 / float(n3) - thisVel) * rule3Scale;
}
thisVel += v1 + v2 + v3;
// - Clamp the speed change before putting the new speed in vel2
clampSpeed(thisVel);
vel2[index] = glm::normalize(thisVel) * maxSpeed;
}
__global__ void kernRearrangeBoidData(int N, int* arrayIndices, glm::vec3* pos, glm::vec3* coherentPos, glm::vec3* vel, glm::vec3* coherentVel)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
int pointerIdx = arrayIndices[index];
coherentPos[index] = pos[pointerIdx];
coherentVel[index] = vel[pointerIdx];
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
kernUpdateVelocityBruteForce << <fullBlocksPerGrid, blockSize >> > (numObjects, dev_pos, dev_vel1, dev_vel2);
cudaDeviceSynchronize();
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_pos, dev_vel2);
cudaDeviceSynchronize();
// TODO-1.2 ping-pong the velocity buffers
glm::vec3 *tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
dim3 gridCellThreadNum((gridCellCount + blockSize - 1) / blockSize);
// Wipe grid data
kernResetIntBuffer<<<gridCellThreadNum, blockSize>>>(gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer<<<gridCellThreadNum, blockSize>>>(gridCellCount, dev_gridCellEndIndices, numObjects-1);
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices<<<fullBlocksPerGrid, blockSize>>>(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("ComputeIndices failed!");
cudaDeviceSynchronize();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
cudaDeviceSynchronize();
kernIdentifyCellStartEnd <<< fullBlocksPerGrid, blockSize >>> (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("IdentifyCellStartEnd failed!");
cudaDeviceSynchronize();
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchScattered <<<fullBlocksPerGrid, blockSize >>> (
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
checkCUDAErrorWithLine("UpdateVelNeighborSearchScattered failed!");
cudaDeviceSynchronize();
// - Update positions
kernUpdatePos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel2);
checkCUDAErrorWithLine("UpdatePos failed!");
cudaDeviceSynchronize();
// - Ping-pong buffers as needed
glm::vec3* tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
// Wipe grid data
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellStartIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridCellCount, dev_gridCellEndIndices, numObjects - 1);
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
kernComputeIndices << <fullBlocksPerGrid, blockSize >> > (numObjects, gridSideCount, gridMinimum, gridInverseCellWidth,
dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
checkCUDAErrorWithLine("ComputeIndices failed!");
cudaDeviceSynchronize();
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
cudaDeviceSynchronize();
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> > (numObjects, dev_particleGridIndices,
dev_gridCellStartIndices, dev_gridCellEndIndices);
checkCUDAErrorWithLine("IdentifyCellStartEnd failed!");
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
kernRearrangeBoidData<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_particleArrayIndices, dev_pos, dev_coherentPos, dev_vel1, dev_coherentVel);
checkCUDAErrorWithLine("rearrange boid data failed!");
cudaDeviceSynchronize();
// - Perform velocity updates using neighbor search
kernUpdateVelNeighborSearchCoherent << <fullBlocksPerGrid, blockSize >> > (
numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices,
dev_gridCellEndIndices, dev_coherentPos, dev_coherentVel, dev_vel2);
checkCUDAErrorWithLine("UpdateVelNeighborSearchScattered failed!");
cudaDeviceSynchronize();
// - Update positions
kernUpdatePos << <fullBlocksPerGrid, blockSize >> > (numObjects, dt, dev_coherentPos, dev_vel2);
checkCUDAErrorWithLine("UpdatePos failed!");
cudaDeviceSynchronize();
// - Ping-pong buffers as needed
// at this point, dev_vel2 should be coherent because it's based on dev_coherentVel
// we just make sure that dev_vel1 becomes coherent as well since the next iteration is based on vel1 first
// TODO: maybe we can use vel1 as the coherentVel to begin with.
glm::vec3* tmp = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = tmp;
// make sure dev_pos is coherent for the next iteration
tmp = dev_pos;
dev_pos = dev_coherentPos;
dev_coherentPos = tmp;
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
cudaFree(dev_coherentPos);
cudaFree(dev_coherentVel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int* dev_intKeys;
int* dev_intValues;
int N = 10;
std::unique_ptr<int[]>intKeys{ new int[N] };
std::unique_ptr<int[]>intValues{ new int[N] };
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues.get(), sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys.get(), dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues.get(), dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// 2.1 Unit Tests
int gridSize = 7;
int* dev_startIndices;
int* dev_endIndices;
cudaMalloc((void**)&dev_startIndices, gridSize * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_startIndices failed!");
cudaMalloc((void**)&dev_endIndices, gridSize * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_endIndices failed!");
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridSize, dev_startIndices, -1);
kernResetIntBuffer << <fullBlocksPerGrid, blockSize >> > (gridSize, dev_endIndices, N - 1);
cudaDeviceSynchronize();
std::unique_ptr<int[]>startIndices{ new int[gridSize] };
std::unique_ptr<int[]>endIndices{ new int[gridSize] };
kernIdentifyCellStartEnd << <fullBlocksPerGrid, blockSize >> > (N, dev_intKeys, dev_startIndices, dev_endIndices);
cudaMemcpy(startIndices.get(), dev_startIndices, sizeof(int) * gridSize, cudaMemcpyDeviceToHost);
cudaMemcpy(endIndices.get(), dev_endIndices, sizeof(int) * gridSize, cudaMemcpyDeviceToHost);
std::cout << "grid cell: " << std::endl;
for (int i = 0; i < gridSize; i++) {
std::cout << " cell: " << i;
std::cout << " start: " << startIndices[i];
std::cout << " end: " << endIndices[i] << std::endl;
}
int* dev_gridIndices;
int* dev_arrayIndices;
cudaMalloc((void**)&dev_gridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridIndices failed!");
cudaMalloc((void**)&dev_arrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_arrayIndices failed!");
glm::vec3* dev_p;
std::unique_ptr<glm::vec3[]>pos{ new glm::vec3[N] };
cudaMalloc((void**)&dev_p, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_p failed!");
kernGenerateRandomPosArray <<<fullBlocksPerGrid, blockSize >>> (1, N, dev_p, 1.0f);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
float gridCellWidth = 1.0f;
kernComputeIndices<<<fullBlocksPerGrid, blockSize>>>(N, 2, glm::vec3(-1, -1, -1) * gridCellWidth, 1.0f / gridCellWidth,
dev_p, dev_arrayIndices, dev_gridIndices);
std::unique_ptr<int[]>gridIndices{ new int[N] };
std::unique_ptr<int[]>arrayIndices{ new int[N] };
cudaMemcpy(gridIndices.get(), dev_gridIndices, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(arrayIndices.get(), dev_arrayIndices, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(pos.get(), dev_p, sizeof(glm::vec3) * N, cudaMemcpyDeviceToHost);
std::cout << "compute indices: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " pos: " << pos[i].x << " " << pos[i].y << " " << pos[i].z;
std::cout << " grid: " << gridIndices[i];
std::cout << " array: " << arrayIndices[i] << std::endl;
}
// cleanup
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
cudaFree(dev_startIndices);
cudaFree(dev_endIndices);
cudaFree(dev_gridIndices);
cudaFree(dev_arrayIndices);
cudaFree(dev_p);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
5214dbf931a589d75315e0efb30ac48799665348.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fft_helper.h"
#include <hipfft.h>
namespace SCAMP {
__global__ void elementwise_multiply_inplace(const hipDoubleComplex* A, hipDoubleComplex* B, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < size) {
B[tid] = cuCmul(A[tid], B[tid]);
}
}
// A is input unaligned sliding dot products produced by ifft
// out is the computed vector of distances
__global__ void normalized_aligned_dot_products(const double* A, const double divisor,
const unsigned int m, const unsigned int n,
double* QT)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
if (a < n) {
QT[a] = A[a + m - 1] / divisor;
}
}
__global__ void populate_reverse_pad(const double *Q, double *Q_reverse_pad, const double *mean, const int window_size, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
double mu = *mean;
if(tid < window_size) {
Q_reverse_pad[tid] = Q[window_size - 1 - tid] - mu;
}else if(tid < size){
Q_reverse_pad[tid] = 0;
}
}
SCAMPError_t fft_precompute_helper::compute_QT(double* QT, const double* T, const double *Q, const double *qmeans, hipStream_t s)
{
hipfftResult cufftError;
hipError_t error;
const int n = size - window_size + 1;
dim3 block(fft_work_size, 1, 1);
cufftError = hipfftSetStream(fft_plan, s);
if (cufftError != HIPFFT_SUCCESS) {
return SCAMP_CUFFT_ERROR;
}
cufftError = hipfftSetStream(ifft_plan,s);
if (cufftError != HIPFFT_SUCCESS) {
return SCAMP_CUFFT_ERROR;
}
// Compute the FFT of the time series
cufftError = hipfftExecD2Z(fft_plan, const_cast<double*>(T), Tc);
if (cufftError != HIPFFT_SUCCESS) {
return SCAMP_CUFFT_EXEC_ERROR;
}
// Reverse and zero pad the query
hipLaunchKernelGGL(( populate_reverse_pad), dim3(dim3(ceil(size / (float) fft_work_size),1,1)), dim3(block), 0, s, Q, Q_reverse_pad, qmeans, window_size, size);
error = hipPeekAtLastError();
if (error != hipSuccess) {
return SCAMP_CUDA_ERROR;
}
cufftError = hipfftExecD2Z(fft_plan, Q_reverse_pad, Qc);
if (cufftError != HIPFFT_SUCCESS) {
return SCAMP_CUFFT_EXEC_ERROR;
}
hipLaunchKernelGGL(( elementwise_multiply_inplace), dim3(dim3(ceil(cufft_data_size / (float) fft_work_size), 1, 1)), dim3(block), 0, s, Tc, Qc, cufft_data_size);
error = hipPeekAtLastError();
if ( error != hipSuccess) {
return SCAMP_CUDA_ERROR;
}
cufftError = hipfftExecZ2D(ifft_plan, Qc, Q_reverse_pad);
if (cufftError != HIPFFT_SUCCESS) {
return SCAMP_CUFFT_EXEC_ERROR;
}
hipLaunchKernelGGL(( normalized_aligned_dot_products), dim3(dim3(ceil(n / (float) fft_work_size), 1, 1)), dim3(block), 0, s, Q_reverse_pad, size, window_size, n, QT);
error = hipPeekAtLastError();
if(error != hipSuccess) {
return SCAMP_CUDA_ERROR;
}
return SCAMP_NO_ERROR;
}
}
| 5214dbf931a589d75315e0efb30ac48799665348.cu | #include "fft_helper.h"
#include <cufft.h>
namespace SCAMP {
__global__ void elementwise_multiply_inplace(const cuDoubleComplex* A, cuDoubleComplex* B, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < size) {
B[tid] = cuCmul(A[tid], B[tid]);
}
}
// A is input unaligned sliding dot products produced by ifft
// out is the computed vector of distances
__global__ void normalized_aligned_dot_products(const double* A, const double divisor,
const unsigned int m, const unsigned int n,
double* QT)
{
int a = blockIdx.x * blockDim.x + threadIdx.x;
if (a < n) {
QT[a] = A[a + m - 1] / divisor;
}
}
__global__ void populate_reverse_pad(const double *Q, double *Q_reverse_pad, const double *mean, const int window_size, const int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
double mu = *mean;
if(tid < window_size) {
Q_reverse_pad[tid] = Q[window_size - 1 - tid] - mu;
}else if(tid < size){
Q_reverse_pad[tid] = 0;
}
}
SCAMPError_t fft_precompute_helper::compute_QT(double* QT, const double* T, const double *Q, const double *qmeans, cudaStream_t s)
{
cufftResult cufftError;
cudaError_t error;
const int n = size - window_size + 1;
dim3 block(fft_work_size, 1, 1);
cufftError = cufftSetStream(fft_plan, s);
if (cufftError != CUFFT_SUCCESS) {
return SCAMP_CUFFT_ERROR;
}
cufftError = cufftSetStream(ifft_plan,s);
if (cufftError != CUFFT_SUCCESS) {
return SCAMP_CUFFT_ERROR;
}
// Compute the FFT of the time series
cufftError = cufftExecD2Z(fft_plan, const_cast<double*>(T), Tc);
if (cufftError != CUFFT_SUCCESS) {
return SCAMP_CUFFT_EXEC_ERROR;
}
// Reverse and zero pad the query
populate_reverse_pad<<<dim3(ceil(size / (float) fft_work_size),1,1), block, 0, s>>>(Q, Q_reverse_pad, qmeans, window_size, size);
error = cudaPeekAtLastError();
if (error != cudaSuccess) {
return SCAMP_CUDA_ERROR;
}
cufftError = cufftExecD2Z(fft_plan, Q_reverse_pad, Qc);
if (cufftError != CUFFT_SUCCESS) {
return SCAMP_CUFFT_EXEC_ERROR;
}
elementwise_multiply_inplace<<<dim3(ceil(cufft_data_size / (float) fft_work_size), 1, 1), block, 0, s>>>(Tc, Qc, cufft_data_size);
error = cudaPeekAtLastError();
if ( error != cudaSuccess) {
return SCAMP_CUDA_ERROR;
}
cufftError = cufftExecZ2D(ifft_plan, Qc, Q_reverse_pad);
if (cufftError != CUFFT_SUCCESS) {
return SCAMP_CUFFT_EXEC_ERROR;
}
normalized_aligned_dot_products<<<dim3(ceil(n / (float) fft_work_size), 1, 1), block, 0, s>>>(Q_reverse_pad, size, window_size, n, QT);
error = cudaPeekAtLastError();
if(error != cudaSuccess) {
return SCAMP_CUDA_ERROR;
}
return SCAMP_NO_ERROR;
}
}
|
0321e9cffbffa2b703fad5878e2e52c493ebcbf9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/cuda/local/local.cuh"
#include "src/cuda/local/cuda-convnet2/cudaconv2.cuh"
#include "src/cuda/local/cuda-convnet2/nvmatrix.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace local {
constexpr size_t Ns = 4, ICs = 4;
size_t forward_proxy_default_share_mem_in_bytes(size_t IH, size_t IW) {
return Ns * ICs * sizeof(float) * IH * IW;
}
// blockIdx.y is OC*OH*OW/1024
// blockIdx.x is N/4
// threadIdx.x is [0, 1024)
template <uint32_t Ns, uint32_t ICs, bool is_xcorr>
__global__ void forward_kernel(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, uint32_t N, uint32_t IC, uint32_t IH, uint32_t IW,
uint32_t OC, uint32_t OH, uint32_t OW, uint32_t FH, uint32_t FW, uint32_t INs,
size_t ONs, uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW) {
// Ns*ICs*sizeof(float)*IH*IW
extern __shared__ float shared_mem[];
float* src_cache = shared_mem;
uint32_t tid = threadIdx.x;
uint32_t tstride = blockDim.x;
uint32_t oid = tid + blockIdx.y * tstride;
src += blockIdx.x * Ns * INs;
dst += blockIdx.x * Ns * ONs;
uint32_t op = oid / OC;
uint32_t oc = oid % OC;
uint32_t oh = op / OW;
uint32_t ow = op % OW;
float dst_reg[Ns];
for (uint32_t no = 0; no < Ns; ++no)
dst_reg[no] = 0.0f;
uint32_t Nb = min(N - blockIdx.x * Ns, Ns);
for (uint32_t ic = 0; ic < IC; ic += ICs) {
// read ICs-channel src
// (Ns, ICs, IHs, IWs)
uint32_t ICb = min(ICs, IC - ic);
for (uint32_t i = tid; i < Nb * ICs * IH * IW; i += tstride) {
uint32_t ip = i % (IH * IW);
uint32_t ico = i / (IH * IW) % ICs;
uint32_t no = i / (IH * IW) / ICs;
src_cache[i] = (ico < ICb) *
src[no * INs + min(IC - 1, (ic + ico)) * IH * IW + ip];
}
__syncthreads();
if (oid < OC * OH * OW)
for (uint32_t fh = 0; fh < FH; ++fh) {
uint32_t ih;
if (is_xcorr)
ih = oh * SH + fh - PH;
else
ih = oh * SH + (FH - fh - 1) - PH;
if (ih < IH)
for (uint32_t fw = 0; fw < FW; ++fw) {
uint32_t iw;
if (is_xcorr)
iw = ow * SW + fw - PW;
else
iw = ow * SW + (FW - fw - 1) - PW;
if (iw < IW)
for (uint32_t ico = 0; ico < ICb; ++ico) {
uint32_t fid = op * IC * FH * FW * OC +
(ic + ico) * FH * FW * OC +
fh * FW * OC + fw * OC + oc;
float fval = filter[fid];
float src_reg[Ns];
#pragma unroll
for (uint32_t no = 0; no < Ns; ++no) {
src_reg[no] = src_cache
[no * ICs * IH * IW + ico * IH * IW +
ih * IW + iw];
}
#pragma unroll
for (uint32_t no = 0; no < Ns; ++no) {
dst_reg[no] += src_reg[no] * fval;
}
}
}
}
__syncthreads();
}
if (oid < OC * OH * OW) {
for (uint32_t no = 0; no < Nb; ++no) {
dst[no * ONs + oc * OH * OW + op] = dst_reg[no];
}
}
}
void forward_proxy_default(
const float* src, const float* filter, float* dst, size_t N, size_t IC,
size_t IH, size_t IW, size_t OC, size_t OH, size_t OW, size_t FH, size_t FW,
size_t INs, size_t ONs, size_t PH, size_t PW, size_t SH, size_t SW,
bool is_xcorr, hipStream_t stream) {
size_t threads = 256;
dim3 blocks = dim3(DIVUP(N, Ns), DIVUP(OC * OH * OW, threads));
if (is_xcorr) {
hipLaunchKernelGGL(( forward_kernel<Ns, ICs, true>)
, dim3(blocks), dim3(threads), Ns * ICs * sizeof(float) * IH * IW, stream,
src, filter, dst, N, IC, IH, IW, OC, OH, OW, FH, FW, INs, ONs,
PH, PW, SH, SW);
} else {
hipLaunchKernelGGL(( forward_kernel<Ns, ICs, false>)
, dim3(blocks), dim3(threads), Ns * ICs * sizeof(float) * IH * IW, stream,
src, filter, dst, N, IC, IH, IW, OC, OH, OW, FH, FW, INs, ONs,
PH, PW, SH, SW);
}
after_kernel_launch();
}
bool can_forward_proxy_convnet(
size_t N, size_t IC, size_t /* IH */, size_t /* IW */, size_t /*OC*/,
size_t /* OH */, size_t /* OW */, size_t FH, size_t FW, size_t /* INs */,
size_t /* ONs */, size_t PH, size_t PW, size_t SH, size_t SW) {
bool flag = true;
// check pad
flag &= (PH == PW);
// check stride
flag &= (SH == SW);
// megdnn_assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 ||
// numImgColors % 4 == 0)));
flag &= (IC <= 3 || IC % 4 == 0);
// megdnn_assert(numFilters % (16 * numGroups) == 0);
// flag &= (OC % 16 == 0);
// megdnn_assert(filterSize * filterSize == filterPixels);
flag &= (FH == FW);
flag &= (SH <= FH);
flag &= (N % 32 == 0);
return flag;
}
size_t get_workspace_in_floats_forward_proxy_convnet(
size_t N, size_t IC, size_t IH, size_t IW, size_t OC, size_t OH, size_t OW,
size_t /* FH */, size_t /* FW */, size_t /* INs */, size_t /* ONs */,
size_t /* PH */, size_t /* PW */, size_t /* SH */, size_t /* SW */) {
return N * IC * IH * IW + N * OC * OH * OW;
}
void forward_proxy_convnet(
const float* src, const float* filter, float* dst, float* workspace, size_t N,
size_t IC, size_t IH, size_t IW, size_t OC, size_t OH, size_t OW, size_t FH,
size_t FW, size_t INs, size_t ONs, // IN stride and ON stride
size_t PH, size_t /* PW */, size_t SH, size_t /* SW */,
hipblasHandle_t cublas_handle, hipStream_t stream, float* one, float* zero)
{
MemorySegment msrc_n(const_cast<float*>(src)), mdst_n(dst),
mfilter(const_cast<float*>(filter)), msrc_t(workspace + 0),
mdst_t(workspace + N * IC * IH * IW);
NVMatrix nvimage_n(&msrc_n, N, IC * IH * IW, INs);
NVMatrix nvtarget_n(&mdst_n, N, OC * OH * OW, ONs);
NVMatrix nvimage_t(&msrc_t, IC * IH * IW, N);
NVMatrix nvfilter(&mfilter, OH * OW * IC * FH * FW, OC);
NVMatrix nvtarget_t(&mdst_t, OC * OH * OW, N);
nvimage_n.transpose(nvimage_t, cublas_handle, one, zero);
localFilterActs(
stream, nvimage_t, nvfilter, nvtarget_t, IH, OH, OW, -static_cast<int>(PH),
SH, IC, 1);
after_kernel_launch();
nvtarget_t.transpose(nvtarget_n, cublas_handle, one, zero);
}
} // namespace local
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 0321e9cffbffa2b703fad5878e2e52c493ebcbf9.cu | #include "src/cuda/local/local.cuh"
#include "src/cuda/local/cuda-convnet2/cudaconv2.cuh"
#include "src/cuda/local/cuda-convnet2/nvmatrix.cuh"
#include "src/cuda/utils.cuh"
namespace megdnn {
namespace cuda {
namespace local {
constexpr size_t Ns = 4, ICs = 4;
size_t forward_proxy_default_share_mem_in_bytes(size_t IH, size_t IW) {
return Ns * ICs * sizeof(float) * IH * IW;
}
// blockIdx.y is OC*OH*OW/1024
// blockIdx.x is N/4
// threadIdx.x is [0, 1024)
template <uint32_t Ns, uint32_t ICs, bool is_xcorr>
__global__ void forward_kernel(
const float* __restrict__ src, const float* __restrict__ filter,
float* __restrict__ dst, uint32_t N, uint32_t IC, uint32_t IH, uint32_t IW,
uint32_t OC, uint32_t OH, uint32_t OW, uint32_t FH, uint32_t FW, uint32_t INs,
size_t ONs, uint32_t PH, uint32_t PW, uint32_t SH, uint32_t SW) {
// Ns*ICs*sizeof(float)*IH*IW
extern __shared__ float shared_mem[];
float* src_cache = shared_mem;
uint32_t tid = threadIdx.x;
uint32_t tstride = blockDim.x;
uint32_t oid = tid + blockIdx.y * tstride;
src += blockIdx.x * Ns * INs;
dst += blockIdx.x * Ns * ONs;
uint32_t op = oid / OC;
uint32_t oc = oid % OC;
uint32_t oh = op / OW;
uint32_t ow = op % OW;
float dst_reg[Ns];
for (uint32_t no = 0; no < Ns; ++no)
dst_reg[no] = 0.0f;
uint32_t Nb = min(N - blockIdx.x * Ns, Ns);
for (uint32_t ic = 0; ic < IC; ic += ICs) {
// read ICs-channel src
// (Ns, ICs, IHs, IWs)
uint32_t ICb = min(ICs, IC - ic);
for (uint32_t i = tid; i < Nb * ICs * IH * IW; i += tstride) {
uint32_t ip = i % (IH * IW);
uint32_t ico = i / (IH * IW) % ICs;
uint32_t no = i / (IH * IW) / ICs;
src_cache[i] = (ico < ICb) *
src[no * INs + min(IC - 1, (ic + ico)) * IH * IW + ip];
}
__syncthreads();
if (oid < OC * OH * OW)
for (uint32_t fh = 0; fh < FH; ++fh) {
uint32_t ih;
if (is_xcorr)
ih = oh * SH + fh - PH;
else
ih = oh * SH + (FH - fh - 1) - PH;
if (ih < IH)
for (uint32_t fw = 0; fw < FW; ++fw) {
uint32_t iw;
if (is_xcorr)
iw = ow * SW + fw - PW;
else
iw = ow * SW + (FW - fw - 1) - PW;
if (iw < IW)
for (uint32_t ico = 0; ico < ICb; ++ico) {
uint32_t fid = op * IC * FH * FW * OC +
(ic + ico) * FH * FW * OC +
fh * FW * OC + fw * OC + oc;
float fval = filter[fid];
float src_reg[Ns];
#pragma unroll
for (uint32_t no = 0; no < Ns; ++no) {
src_reg[no] = src_cache
[no * ICs * IH * IW + ico * IH * IW +
ih * IW + iw];
}
#pragma unroll
for (uint32_t no = 0; no < Ns; ++no) {
dst_reg[no] += src_reg[no] * fval;
}
}
}
}
__syncthreads();
}
if (oid < OC * OH * OW) {
for (uint32_t no = 0; no < Nb; ++no) {
dst[no * ONs + oc * OH * OW + op] = dst_reg[no];
}
}
}
void forward_proxy_default(
const float* src, const float* filter, float* dst, size_t N, size_t IC,
size_t IH, size_t IW, size_t OC, size_t OH, size_t OW, size_t FH, size_t FW,
size_t INs, size_t ONs, size_t PH, size_t PW, size_t SH, size_t SW,
bool is_xcorr, cudaStream_t stream) {
size_t threads = 256;
dim3 blocks = dim3(DIVUP(N, Ns), DIVUP(OC * OH * OW, threads));
if (is_xcorr) {
forward_kernel<Ns, ICs, true>
<<<blocks, threads, Ns * ICs * sizeof(float) * IH * IW, stream>>>(
src, filter, dst, N, IC, IH, IW, OC, OH, OW, FH, FW, INs, ONs,
PH, PW, SH, SW);
} else {
forward_kernel<Ns, ICs, false>
<<<blocks, threads, Ns * ICs * sizeof(float) * IH * IW, stream>>>(
src, filter, dst, N, IC, IH, IW, OC, OH, OW, FH, FW, INs, ONs,
PH, PW, SH, SW);
}
after_kernel_launch();
}
bool can_forward_proxy_convnet(
size_t N, size_t IC, size_t /* IH */, size_t /* IW */, size_t /*OC*/,
size_t /* OH */, size_t /* OW */, size_t FH, size_t FW, size_t /* INs */,
size_t /* ONs */, size_t PH, size_t PW, size_t SH, size_t SW) {
bool flag = true;
// check pad
flag &= (PH == PW);
// check stride
flag &= (SH == SW);
// megdnn_assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 ||
// numImgColors % 4 == 0)));
flag &= (IC <= 3 || IC % 4 == 0);
// megdnn_assert(numFilters % (16 * numGroups) == 0);
// flag &= (OC % 16 == 0);
// megdnn_assert(filterSize * filterSize == filterPixels);
flag &= (FH == FW);
flag &= (SH <= FH);
flag &= (N % 32 == 0);
return flag;
}
size_t get_workspace_in_floats_forward_proxy_convnet(
size_t N, size_t IC, size_t IH, size_t IW, size_t OC, size_t OH, size_t OW,
size_t /* FH */, size_t /* FW */, size_t /* INs */, size_t /* ONs */,
size_t /* PH */, size_t /* PW */, size_t /* SH */, size_t /* SW */) {
return N * IC * IH * IW + N * OC * OH * OW;
}
void forward_proxy_convnet(
const float* src, const float* filter, float* dst, float* workspace, size_t N,
size_t IC, size_t IH, size_t IW, size_t OC, size_t OH, size_t OW, size_t FH,
size_t FW, size_t INs, size_t ONs, // IN stride and ON stride
size_t PH, size_t /* PW */, size_t SH, size_t /* SW */,
cublasHandle_t cublas_handle, cudaStream_t stream, float* one, float* zero)
{
MemorySegment msrc_n(const_cast<float*>(src)), mdst_n(dst),
mfilter(const_cast<float*>(filter)), msrc_t(workspace + 0),
mdst_t(workspace + N * IC * IH * IW);
NVMatrix nvimage_n(&msrc_n, N, IC * IH * IW, INs);
NVMatrix nvtarget_n(&mdst_n, N, OC * OH * OW, ONs);
NVMatrix nvimage_t(&msrc_t, IC * IH * IW, N);
NVMatrix nvfilter(&mfilter, OH * OW * IC * FH * FW, OC);
NVMatrix nvtarget_t(&mdst_t, OC * OH * OW, N);
nvimage_n.transpose(nvimage_t, cublas_handle, one, zero);
localFilterActs(
stream, nvimage_t, nvfilter, nvtarget_t, IH, OH, OW, -static_cast<int>(PH),
SH, IC, 1);
after_kernel_launch();
nvtarget_t.transpose(nvtarget_n, cublas_handle, one, zero);
}
} // namespace local
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
03d105fa71979771a739128b19c64d146adec107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
const long int IMAGE_SIZE = 8192;
const int BLOCK_SIZE = 32;
const float alpha = 2.f;
const float beta = 2.f;
__global__ void sgemmNaive(float* A, float* B, float* C, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float val = 0.f;
for (int i = 0; i < N; i++)
val += A[row * N + i] * B[i * N + col];
C[row * N + col] = alpha * val + beta * C[row * N + col];
}
__global__ void sgemmSHM(float* A, float* B, float* C, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float val = 0.f;
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
for (int i = 0; i < N / BLOCK_SIZE; i++) {
As[threadIdx.y][threadIdx.x] = A[row * N + BLOCK_SIZE * i + threadIdx.x];
Bs[threadIdx.y][threadIdx.x] = B[(threadIdx.y + i * BLOCK_SIZE) * N + col];
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
val += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
C[row * N + col] = alpha * val + beta * C[row * N + col];
}
int main()
{
float *A, *A_d, *B, *B_d, *C, *C_d;
const int data_size = IMAGE_SIZE * IMAGE_SIZE * sizeof(float);
hipHostMalloc(&A, data_size);
hipHostMalloc(&B, data_size);
hipHostMalloc(&C, data_size);
hipMalloc(&A_d, data_size);
hipMalloc(&B_d, data_size);
hipMalloc(&C_d, data_size);
const int grid_size = IMAGE_SIZE / BLOCK_SIZE; // 8192 / 32 = 256
dim3 grid(grid_size, grid_size); // 256 * 256
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // 32 x 32 = 1024
for (int i = 0; i < IMAGE_SIZE * IMAGE_SIZE; i++) {
A[i] = 1.f;
B[i] = 1.f;
C[i] = 1.f;
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(A_d, A, data_size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, data_size, hipMemcpyHostToDevice);
hipMemcpy(C_d, C, data_size, hipMemcpyHostToDevice);
hipEventRecord(start);
hipLaunchKernelGGL(( sgemmNaive), dim3(grid), dim3(block), 0, 0, A_d, B_d, C_d, IMAGE_SIZE);
hipEventRecord(stop);
hipMemcpy(C, C_d, data_size, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
// print runtime and FLOP rate info
float milliseconds = 0.f;
hipEventElapsedTime(&milliseconds, start, stop);
double seconds = static_cast<double>(milliseconds) / 1000.;
std::cout << "sgemmNaive runtime: " << seconds << "\n";
std::cout << "Performance (TFLOPS/s): "
<< (IMAGE_SIZE * IMAGE_SIZE * IMAGE_SIZE) * 2.0 / seconds / 1e12 << "\n\n";
hipMemcpy(A_d, A, data_size, hipMemcpyHostToDevice);
hipMemcpy(B_d, B, data_size, hipMemcpyHostToDevice);
hipMemcpy(C_d, C, data_size, hipMemcpyHostToDevice);
hipEventRecord(start);
hipLaunchKernelGGL(( sgemmSHM), dim3(grid), dim3(block), 0, 0, A_d, B_d, C_d, IMAGE_SIZE);
hipEventRecord(stop);
hipMemcpy(C, C_d, data_size, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
// print runtime and FLOP rate
milliseconds = 0.f;
hipEventElapsedTime(&milliseconds, start, stop);
seconds = static_cast<double>(milliseconds) / 1000.;
std::cout << "sgemmSHM runtime: " << seconds << "\n";
std::cout << "Performance (TFLOPS/s): "
<< (IMAGE_SIZE * IMAGE_SIZE * IMAGE_SIZE) * 2.0 / seconds / 1e12 << "\n\n";
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
hipHostFree(A);
hipHostFree(B);
hipHostFree(C);
}
| 03d105fa71979771a739128b19c64d146adec107.cu | #include <iostream>
const long int IMAGE_SIZE = 8192;
const int BLOCK_SIZE = 32;
const float alpha = 2.f;
const float beta = 2.f;
__global__ void sgemmNaive(float* A, float* B, float* C, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float val = 0.f;
for (int i = 0; i < N; i++)
val += A[row * N + i] * B[i * N + col];
C[row * N + col] = alpha * val + beta * C[row * N + col];
}
__global__ void sgemmSHM(float* A, float* B, float* C, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float val = 0.f;
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
for (int i = 0; i < N / BLOCK_SIZE; i++) {
As[threadIdx.y][threadIdx.x] = A[row * N + BLOCK_SIZE * i + threadIdx.x];
Bs[threadIdx.y][threadIdx.x] = B[(threadIdx.y + i * BLOCK_SIZE) * N + col];
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
val += As[threadIdx.y][j] * Bs[j][threadIdx.x];
__syncthreads();
}
C[row * N + col] = alpha * val + beta * C[row * N + col];
}
int main()
{
float *A, *A_d, *B, *B_d, *C, *C_d;
const int data_size = IMAGE_SIZE * IMAGE_SIZE * sizeof(float);
cudaMallocHost(&A, data_size);
cudaMallocHost(&B, data_size);
cudaMallocHost(&C, data_size);
cudaMalloc(&A_d, data_size);
cudaMalloc(&B_d, data_size);
cudaMalloc(&C_d, data_size);
const int grid_size = IMAGE_SIZE / BLOCK_SIZE; // 8192 / 32 = 256
dim3 grid(grid_size, grid_size); // 256 * 256
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // 32 x 32 = 1024
for (int i = 0; i < IMAGE_SIZE * IMAGE_SIZE; i++) {
A[i] = 1.f;
B[i] = 1.f;
C[i] = 1.f;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(A_d, A, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, data_size, cudaMemcpyHostToDevice);
cudaEventRecord(start);
sgemmNaive<<<grid, block>>>(A_d, B_d, C_d, IMAGE_SIZE);
cudaEventRecord(stop);
cudaMemcpy(C, C_d, data_size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
// print runtime and FLOP rate info
float milliseconds = 0.f;
cudaEventElapsedTime(&milliseconds, start, stop);
double seconds = static_cast<double>(milliseconds) / 1000.;
std::cout << "sgemmNaive runtime: " << seconds << "\n";
std::cout << "Performance (TFLOPS/s): "
<< (IMAGE_SIZE * IMAGE_SIZE * IMAGE_SIZE) * 2.0 / seconds / 1e12 << "\n\n";
cudaMemcpy(A_d, A, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, data_size, cudaMemcpyHostToDevice);
cudaMemcpy(C_d, C, data_size, cudaMemcpyHostToDevice);
cudaEventRecord(start);
sgemmSHM<<<grid, block>>>(A_d, B_d, C_d, IMAGE_SIZE);
cudaEventRecord(stop);
cudaMemcpy(C, C_d, data_size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
// print runtime and FLOP rate
milliseconds = 0.f;
cudaEventElapsedTime(&milliseconds, start, stop);
seconds = static_cast<double>(milliseconds) / 1000.;
std::cout << "sgemmSHM runtime: " << seconds << "\n";
std::cout << "Performance (TFLOPS/s): "
<< (IMAGE_SIZE * IMAGE_SIZE * IMAGE_SIZE) * 2.0 / seconds / 1e12 << "\n\n";
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
cudaFreeHost(A);
cudaFreeHost(B);
cudaFreeHost(C);
}
|
a19c361f847572c3ca64e75904ab52dfecbb01e2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* hist.cu
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <time.h>
#include <limits.h>
#include "bmpUtil.h"
#include "common.h"
/*
* Kernel 1D that computes histogram on GPU
*/
__global__ void histogramBMP(uint *bins, const pel *imgSrc, const uint W, const uint N, const uint M) {
// ** pixel granularity **
uint x = blockDim.x * blockIdx.x + threadIdx.x; // 1D pixel linear index over [0:W*H)
uint nrows = x / W; // num of rows to skip
uint off = x % W; // offset (= col) within current row
if (x >= N) // pixel out of range
return;
// ** byte granularity **
uint p = M * nrows + 3*off; // src byte position of the pixel
pel R = imgSrc[p];
pel G = imgSrc[p+1];
pel B = imgSrc[p+2];
atomicAdd(&bins[R], 1);
atomicAdd(&bins[G+256], 1);
atomicAdd(&bins[B+512], 1);
}
/*
* Function that computes histogram on CPU
*/
void hist_CPU(uint *bins, const pel *imgSrc, const uint W, const uint H, const uint M) {
for (int i = 0; i < W*H; i++) {
uint r = i / W; // row of the source pixel
uint off = i - r * W; // col of the source pixel
// ** byte granularity **
uint p = M * r + 3*off; // src byte position of the pixel
pel R = imgSrc[p];
pel G = imgSrc[p+1];
pel B = imgSrc[p+2];
bins[R] += 1;
bins[G+256] += 1;
bins[B+512] += 1;
}
}
int main(int argc, char **argv) {
uint dimBlock = 1024;
pel *imgBMP_CPU; // Where images are stored in CPU
pel *imgBMP_GPU; // Where images are stored in GPU
uint *binsRGB_CPU, *binsRGB_GPU, *binsRGB_GPU2CPU;
uint N_bins = 3*256;
uint bin_size = N_bins*sizeof(uint);
if (argc > 2)
dimBlock = atoi(argv[2]);
else if (argc < 2) {
printf("\n\nUsage: hist InputFilename dimBlock\n");
exit(EXIT_FAILURE);
}
// bins for CPU & GPU
binsRGB_CPU = (uint*) calloc(N_bins, sizeof(uint));
binsRGB_GPU2CPU = (uint*) malloc(bin_size);
CHECK(hipMalloc((void**) &binsRGB_GPU, bin_size));
// Create CPU memory to store the input image
imgBMP_CPU = ReadBMPlin(argv[1]);
if (imgBMP_CPU == NULL) {
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Allocate GPU buffer for image and bins
CHECK(hipMalloc((void**) &imgBMP_GPU, IMAGESIZE));
// Copy input vectors from host memory to GPU buffers.
CHECK(hipMemcpy(imgBMP_GPU, imgBMP_CPU, IMAGESIZE, hipMemcpyHostToDevice));
// CPU histogram
double start = seconds(); // start time
hist_CPU(binsRGB_CPU, imgBMP_CPU, WIDTH, HEIGHT, WIDTHB);
double stop = seconds(); // elapsed time
printf("\nCPU elapsed time %f sec \n\n", stop - start);
// invoke kernels (define grid and block sizes)
uint nPixels = WIDTH*HEIGHT;
int dimGrid = (nPixels + dimBlock - 1) / dimBlock;
printf("\ndimGrid = %d dimBlock = %d\n",dimGrid,dimBlock);
start = seconds(); // start time
hipLaunchKernelGGL(( histogramBMP), dim3(dimGrid), dim3(dimBlock), 0, 0, binsRGB_GPU, imgBMP_GPU, WIDTH, nPixels, WIDTHB);
CHECK(hipDeviceSynchronize());
stop = seconds(); // elapsed time
printf("\nGPU elapsed time %f sec \n\n", stop - start);
// Copy output (results) from GPU buffer to host (CPU) memory.
CHECK(hipMemcpy(binsRGB_GPU2CPU, binsRGB_GPU, bin_size, hipMemcpyDeviceToHost));
for (int i = 0; i < N_bins/3; i++)
printf("bin_GPU[%d] = \t%d\t%d\t%d\t -- bin_CPU[%d] = \t%d\t%d\t%d\n", i,
binsRGB_GPU2CPU[i],binsRGB_GPU2CPU[i+256],binsRGB_GPU2CPU[i+512],
i,binsRGB_CPU[i],binsRGB_CPU[i+256],binsRGB_CPU[i+512]);
// Deallocate GPU memory
hipFree(imgBMP_GPU);
hipFree(binsRGB_GPU);
// tracing tools spel as Parallel Nsight and Visual Profiler to show complete traces.
CHECK(hipDeviceReset());
return (EXIT_SUCCESS);
}
/*
* Read a 24-bit/pixel BMP file into a 1D linear array.
* Allocate memory to store the 1D image and return its pointer
*/
pel *ReadBMPlin(char* fn) {
static pel *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL) {
printf("\n\n%s NOT FOUND\n\n", fn);
exit(EXIT_FAILURE);
}
pel HeaderInfo[54];
size_t nByte = fread(HeaderInfo, sizeof(pel), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*) &HeaderInfo[18];
img.width = width;
int height = *(int*) &HeaderInfo[22];
img.height = height;
int RowBytes = (width * 3 + 3) & (~3); // row is multiple of 4 pixel
img.rowByte = RowBytes;
//save header for re-use
memcpy(img.headInfo, HeaderInfo, 54);
printf("\n Input File name: %5s (%d x %d) File Size=%lu", fn, img.width, img.height, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (pel *) malloc(IMAGESIZE);
if (Img == NULL)
return Img; // Cannot allocate memory
// read the image from disk
size_t out = fread(Img, sizeof(pel), IMAGESIZE, f);
fclose(f);
return Img;
}
| a19c361f847572c3ca64e75904ab52dfecbb01e2.cu | /**
* hist.cu
*/
#include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <limits.h>
#include "bmpUtil.h"
#include "common.h"
/*
* Kernel 1D that computes histogram on GPU
*/
__global__ void histogramBMP(uint *bins, const pel *imgSrc, const uint W, const uint N, const uint M) {
// ** pixel granularity **
uint x = blockDim.x * blockIdx.x + threadIdx.x; // 1D pixel linear index over [0:W*H)
uint nrows = x / W; // num of rows to skip
uint off = x % W; // offset (= col) within current row
if (x >= N) // pixel out of range
return;
// ** byte granularity **
uint p = M * nrows + 3*off; // src byte position of the pixel
pel R = imgSrc[p];
pel G = imgSrc[p+1];
pel B = imgSrc[p+2];
atomicAdd(&bins[R], 1);
atomicAdd(&bins[G+256], 1);
atomicAdd(&bins[B+512], 1);
}
/*
* Function that computes histogram on CPU
*/
void hist_CPU(uint *bins, const pel *imgSrc, const uint W, const uint H, const uint M) {
for (int i = 0; i < W*H; i++) {
uint r = i / W; // row of the source pixel
uint off = i - r * W; // col of the source pixel
// ** byte granularity **
uint p = M * r + 3*off; // src byte position of the pixel
pel R = imgSrc[p];
pel G = imgSrc[p+1];
pel B = imgSrc[p+2];
bins[R] += 1;
bins[G+256] += 1;
bins[B+512] += 1;
}
}
int main(int argc, char **argv) {
uint dimBlock = 1024;
pel *imgBMP_CPU; // Where images are stored in CPU
pel *imgBMP_GPU; // Where images are stored in GPU
uint *binsRGB_CPU, *binsRGB_GPU, *binsRGB_GPU2CPU;
uint N_bins = 3*256;
uint bin_size = N_bins*sizeof(uint);
if (argc > 2)
dimBlock = atoi(argv[2]);
else if (argc < 2) {
printf("\n\nUsage: hist InputFilename dimBlock\n");
exit(EXIT_FAILURE);
}
// bins for CPU & GPU
binsRGB_CPU = (uint*) calloc(N_bins, sizeof(uint));
binsRGB_GPU2CPU = (uint*) malloc(bin_size);
CHECK(cudaMalloc((void**) &binsRGB_GPU, bin_size));
// Create CPU memory to store the input image
imgBMP_CPU = ReadBMPlin(argv[1]);
if (imgBMP_CPU == NULL) {
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Allocate GPU buffer for image and bins
CHECK(cudaMalloc((void**) &imgBMP_GPU, IMAGESIZE));
// Copy input vectors from host memory to GPU buffers.
CHECK(cudaMemcpy(imgBMP_GPU, imgBMP_CPU, IMAGESIZE, cudaMemcpyHostToDevice));
// CPU histogram
double start = seconds(); // start time
hist_CPU(binsRGB_CPU, imgBMP_CPU, WIDTH, HEIGHT, WIDTHB);
double stop = seconds(); // elapsed time
printf("\nCPU elapsed time %f sec \n\n", stop - start);
// invoke kernels (define grid and block sizes)
uint nPixels = WIDTH*HEIGHT;
int dimGrid = (nPixels + dimBlock - 1) / dimBlock;
printf("\ndimGrid = %d dimBlock = %d\n",dimGrid,dimBlock);
start = seconds(); // start time
histogramBMP<<<dimGrid, dimBlock>>>(binsRGB_GPU, imgBMP_GPU, WIDTH, nPixels, WIDTHB);
CHECK(cudaDeviceSynchronize());
stop = seconds(); // elapsed time
printf("\nGPU elapsed time %f sec \n\n", stop - start);
// Copy output (results) from GPU buffer to host (CPU) memory.
CHECK(cudaMemcpy(binsRGB_GPU2CPU, binsRGB_GPU, bin_size, cudaMemcpyDeviceToHost));
for (int i = 0; i < N_bins/3; i++)
printf("bin_GPU[%d] = \t%d\t%d\t%d\t -- bin_CPU[%d] = \t%d\t%d\t%d\n", i,
binsRGB_GPU2CPU[i],binsRGB_GPU2CPU[i+256],binsRGB_GPU2CPU[i+512],
i,binsRGB_CPU[i],binsRGB_CPU[i+256],binsRGB_CPU[i+512]);
// Deallocate GPU memory
cudaFree(imgBMP_GPU);
cudaFree(binsRGB_GPU);
// tracing tools spel as Parallel Nsight and Visual Profiler to show complete traces.
CHECK(cudaDeviceReset());
return (EXIT_SUCCESS);
}
/*
* Read a 24-bit/pixel BMP file into a 1D linear array.
* Allocate memory to store the 1D image and return its pointer
*/
pel *ReadBMPlin(char* fn) {
static pel *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL) {
printf("\n\n%s NOT FOUND\n\n", fn);
exit(EXIT_FAILURE);
}
pel HeaderInfo[54];
size_t nByte = fread(HeaderInfo, sizeof(pel), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*) &HeaderInfo[18];
img.width = width;
int height = *(int*) &HeaderInfo[22];
img.height = height;
int RowBytes = (width * 3 + 3) & (~3); // row is multiple of 4 pixel
img.rowByte = RowBytes;
//save header for re-use
memcpy(img.headInfo, HeaderInfo, 54);
printf("\n Input File name: %5s (%d x %d) File Size=%lu", fn, img.width, img.height, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (pel *) malloc(IMAGESIZE);
if (Img == NULL)
return Img; // Cannot allocate memory
// read the image from disk
size_t out = fread(Img, sizeof(pel), IMAGESIZE, f);
fclose(f);
return Img;
}
|
58c83bf30db439b5511004bf8ed8c36b55ed1738.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/function/min.hpp>
#include <nbla/cuda/utils/device_reduce.cuh>
#include <nbla/cuda/utils/reduce_ops/min.cuh>
namespace nbla {
namespace {
template <typename T>
__global__ void adjust_index(const int size, T *data,
const int reduction_size) {
NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] -= i * reduction_size; }
}
} // namespace
template <typename T>
void MinCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
Min<T>::forward_impl(inputs, outputs);
if (this->with_index_ || this->only_index_) {
Variable *idx_var = this->only_index_ ? outputs[0] : outputs[1];
auto idx_ptr = idx_var->cast_data_and_get_pointer<size_t>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(adjust_index, idx_var->size(), idx_ptr,
this->reduction_size_);
}
}
template <typename T>
void MinCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size,
int reduction_size) {
const Tc *x = reinterpret_cast<const Tc *>(x_);
Tc *y = reinterpret_cast<Tc *>(y_);
cuda_set_device(this->device_);
VariablePtr vind = this->index_buff_;
int *ind = vind->cast_data_and_get_pointer<int>(this->ctx_, true);
// TODO: Auto tune.
if (reduction_size / outer_size < 32) {
reduce_2d_mixed_parallel(outer_size, reduction_size,
MinPreOp<Tc>(x, y, ind));
return;
}
// Get block reduce buffer
auto fbuff = cuda_get_reduction_buffer<Tc>(reduction_size, this->ctx_);
auto ibuff = cuda_get_reduction_buffer<int>(reduction_size, this->ctx_);
MinPreOp<Tc> pre_op(x, fbuff.second, ibuff.second);
MinPostOp<Tc> post_op(fbuff.second, ibuff.second, y, ind);
reduce_2d_parallel_reduction(outer_size, reduction_size, pre_op, post_op);
}
template <typename T>
__global__ void kernel_reduce_index_backward(const int num, T *dx,
const int *ind, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[ind[idx]] += dy[idx]; }
}
template <typename T>
void MinCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size,
int reduction_size, bool accum) {
const Tc *dy = reinterpret_cast<const Tc *>(dy_);
Tc *dx = reinterpret_cast<Tc *>(dx_);
cuda_set_device(this->device_);
if (!accum) {
hipMemsetAsync(dx, 0, sizeof(*dx) * outer_size * reduction_size);
}
VariablePtr vind = this->index_buff_;
const int *ind = vind->get_data_pointer<int>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_reduce_index_backward, outer_size, dx,
ind, dy);
}
} // namespace nbla
| 58c83bf30db439b5511004bf8ed8c36b55ed1738.cu | // Copyright 2018,2019,2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/cuda/array/cuda_array.hpp>
#include <nbla/cuda/function/min.hpp>
#include <nbla/cuda/utils/device_reduce.cuh>
#include <nbla/cuda/utils/reduce_ops/min.cuh>
namespace nbla {
namespace {
template <typename T>
__global__ void adjust_index(const int size, T *data,
const int reduction_size) {
NBLA_CUDA_KERNEL_LOOP(i, size) { data[i] -= i * reduction_size; }
}
} // namespace
template <typename T>
void MinCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
Min<T>::forward_impl(inputs, outputs);
if (this->with_index_ || this->only_index_) {
Variable *idx_var = this->only_index_ ? outputs[0] : outputs[1];
auto idx_ptr = idx_var->cast_data_and_get_pointer<size_t>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(adjust_index, idx_var->size(), idx_ptr,
this->reduction_size_);
}
}
template <typename T>
void MinCuda<T>::forward_impl_reduce(const T *x_, T *y_, int outer_size,
int reduction_size) {
const Tc *x = reinterpret_cast<const Tc *>(x_);
Tc *y = reinterpret_cast<Tc *>(y_);
cuda_set_device(this->device_);
VariablePtr vind = this->index_buff_;
int *ind = vind->cast_data_and_get_pointer<int>(this->ctx_, true);
// TODO: Auto tune.
if (reduction_size / outer_size < 32) {
reduce_2d_mixed_parallel(outer_size, reduction_size,
MinPreOp<Tc>(x, y, ind));
return;
}
// Get block reduce buffer
auto fbuff = cuda_get_reduction_buffer<Tc>(reduction_size, this->ctx_);
auto ibuff = cuda_get_reduction_buffer<int>(reduction_size, this->ctx_);
MinPreOp<Tc> pre_op(x, fbuff.second, ibuff.second);
MinPostOp<Tc> post_op(fbuff.second, ibuff.second, y, ind);
reduce_2d_parallel_reduction(outer_size, reduction_size, pre_op, post_op);
}
template <typename T>
__global__ void kernel_reduce_index_backward(const int num, T *dx,
const int *ind, const T *dy) {
NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[ind[idx]] += dy[idx]; }
}
template <typename T>
void MinCuda<T>::backward_impl_reduce(const T *dy_, T *dx_, int outer_size,
int reduction_size, bool accum) {
const Tc *dy = reinterpret_cast<const Tc *>(dy_);
Tc *dx = reinterpret_cast<Tc *>(dx_);
cuda_set_device(this->device_);
if (!accum) {
cudaMemsetAsync(dx, 0, sizeof(*dx) * outer_size * reduction_size);
}
VariablePtr vind = this->index_buff_;
const int *ind = vind->get_data_pointer<int>(this->ctx_);
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_reduce_index_backward, outer_size, dx,
ind, dy);
}
} // namespace nbla
|
bf48b0807de46f9b602692f72caaa604b6ee34e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal z -> s d c
*/
#include "common_magma.h"
extern "C" __global__ void magma_zlange_special(const magmaDoubleComplex *A, double *C, int M, int N, int lda) {
int ibx = blockIdx.x * 64;
int tx = threadIdx.x;
int ty = threadIdx.y;
int idt = ty * 16 + tx;
double Cb[4] = {0., 0., 0., 0.};
A+= ibx+idt ;
const magmaDoubleComplex * Aend = A+lda*N;
magmaDoubleComplex Ap[4]={A[0],A[lda],A[2*lda],A[3*lda]};
C+=ibx+idt;
__shared__ double Cbb[64];
A += 4*lda;
do {
Cb[0]+=cuCabs(Ap[0]);
Ap[0]=A[0];
Cb[1]+=cuCabs(Ap[1]);
Ap[1]=A[lda];
Cb[2]+=cuCabs(Ap[2]);
Ap[2]=A[2*lda];
Cb[3]+=cuCabs(Ap[3]);
Ap[3]=A[3*lda];
A+=4*lda;
} while (A < Aend);
Cb[0]+=cuCabs(Ap[0]);
Cb[1]+=cuCabs(Ap[1]);
Cb[2]+=cuCabs(Ap[2]);
Cb[3]+=cuCabs(Ap[3]);
Cbb[idt]=Cb[0]+Cb[1]+Cb[2]+Cb[3];
C[0]= Cbb[idt];
}
/*
Now do the rest of the parts in CPU ( getting the maximum ) Hybrid .. wow
*/
/*
This Kernel Will be called when
M,N %64 != 0
*/
extern "C" __global__ void
magma_zlange_generic(const magmaDoubleComplex *A, double *C, int M, int N, int lda , int N_mod_4)
{
int ibx = blockIdx.x * 64;
int tx = threadIdx.x;
int ty = threadIdx.y;
int idt = ty * 16 + tx;
double Cb[4] = {0,0,0,0};
/*
Rouding up along row.
*/
if( (ibx + idt) >= M )
A+= (M-1);
else
A+= ibx+idt ;
double Cbb;
C+=ibx+idt;
/*
Where to update. In generic case one place will be update more than once.
What about skipping it ?
-- Another level of optimization is required.
*/
if( N >= 8 ) {
const magmaDoubleComplex * Aend = A+lda*N ;
magmaDoubleComplex Ap[4]={A[0],A[lda],A[2*lda],A[3*lda]};
A+=4*lda;
do {
Cb[0]+=cuCabs(Ap[0]);
Ap[0]=A[0];
Cb[1]+=cuCabs(Ap[1]);
Ap[1]=A[lda];
Cb[2]+=cuCabs(Ap[2]);
Ap[2]=A[2*lda];
Cb[3]+=cuCabs(Ap[3]);
Ap[3]=A[3*lda];
A+=4*lda;
}while (A < Aend);
Cb[0]+=cuCabs(Ap[0]);
Cb[1]+=cuCabs(Ap[1]);
Cb[2]+=cuCabs(Ap[2]);
Cb[3]+=cuCabs(Ap[3]);
}
else{
if(N >= 4){
Cb[0]+=cuCabs(A[0]);
Cb[1]+=cuCabs(A[lda]);
Cb[2]+=cuCabs(A[2*lda]);
Cb[3]+=cuCabs(A[3*lda]);
A+= 4*lda ;
}
}
/*
Clean up Code .......................... e.g. N = 1,2,3, 513, 514, 515 etc.
*/
switch(N_mod_4){
case 0:
break;
case 1:
Cb[0]+=cuCabs(A[0]);
break;
case 2:
Cb[0]+=cuCabs(A[0]);
Cb[1]+=cuCabs(A[lda]);
break;
case 3:
Cb[0]+=cuCabs(A[0]);
Cb[1]+=cuCabs(A[lda]);
Cb[2]+=cuCabs(A[2*lda]);
break;
}
/*Computing Final Result*/
Cbb=Cb[0]+Cb[1]+Cb[2]+Cb[3];
C[0]= Cbb;
}
extern "C" void
magmablas_zlange_64_64_16_4(const magmaDoubleComplex *A, double *C,
int M, int N, int lda,int tree_depth)
{
dim3 threads( 16, 4 );
dim3 grid(M/64+(M%64!=0),1);
if( M %64 == 0 && N %64 == 0 ){
hipLaunchKernelGGL(( magma_zlange_special), dim3(grid), dim3(threads), 0, magma_stream , A, C , M , N , lda);
}
else{
int N_mod_4 = N % 4 ;
N = N - N_mod_4 ;
hipLaunchKernelGGL(( magma_zlange_generic), dim3(grid), dim3(threads), 0, magma_stream , A, C , M , N , lda , N_mod_4);
}
}
extern "C" double
magmablas_zlange(
char norm, magma_int_t M, magma_int_t N,
const magmaDoubleComplex *A, magma_int_t LDA , double *WORK)
{
/*
!!!!!!!!!!!!!!
-- Curreltly it returns NORM = 'I' only.
This is needed for Iterative Refinement
-- Most probably this will be some internal utility function
-- Right now the kernel requires M and N divisible by 64
-- Implemented Generic Case
-- Stan is there any function to get a single value from GPU which is superfast
!!!!!!!!!!!!!!
Purpose
=======
ZLANGE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real matrix A.
Description
===========
ZLANGE returns the value
ZLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
(
( norm1(A), NORM = '1', 'O' or 'o'
(
( normI(A), NORM = 'I' or 'i'
(
( normF(A), NORM = 'F', 'f', 'E' or 'e'
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of
squares). Note that max(abs(A(i,j))) is not a consistent matrix norm.
Arguments
=========
NORM (input) CHARACTER*1
Specifies the value to be returned in ZLANGE as described
above.
M (input) INTEGER
The number of rows of the matrix A. M >= 0. When M = 0,
ZLANGE is set to zero.
N (input) INTEGER
The number of columns of the matrix A. N >= 0. When N = 0,
ZLANGE is set to zero.
A (input) DOUBLE PRECISION array, dimension (LDA,N)
The m by n matrix A.
A is in GPU memory.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(M,1).
WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)),
where LWORK >= M when NORM = 'I'; otherwise, WORK is not
referenced.
WORK is in GPU memory.
===================================================================== */
if( norm !='I' && norm!='i') {
printf("Only normI(A) is provided in this release!");
exit(-1);
}
magmablas_zlange_64_64_16_4( A, WORK , M , N , LDA , 6 );
int val = hipblasIdamax(N, WORK, 1);
double retVal[1];
hipblasGetMatrix( 1, 1, sizeof( magmaDoubleComplex ), WORK+val-1, 1, retVal, 1 ) ;
return retVal[0];
}
| bf48b0807de46f9b602692f72caaa604b6ee34e5.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal z -> s d c
*/
#include "common_magma.h"
extern "C" __global__ void magma_zlange_special(const magmaDoubleComplex *A, double *C, int M, int N, int lda) {
int ibx = blockIdx.x * 64;
int tx = threadIdx.x;
int ty = threadIdx.y;
int idt = ty * 16 + tx;
double Cb[4] = {0., 0., 0., 0.};
A+= ibx+idt ;
const magmaDoubleComplex * Aend = A+lda*N;
magmaDoubleComplex Ap[4]={A[0],A[lda],A[2*lda],A[3*lda]};
C+=ibx+idt;
__shared__ double Cbb[64];
A += 4*lda;
do {
Cb[0]+=cuCabs(Ap[0]);
Ap[0]=A[0];
Cb[1]+=cuCabs(Ap[1]);
Ap[1]=A[lda];
Cb[2]+=cuCabs(Ap[2]);
Ap[2]=A[2*lda];
Cb[3]+=cuCabs(Ap[3]);
Ap[3]=A[3*lda];
A+=4*lda;
} while (A < Aend);
Cb[0]+=cuCabs(Ap[0]);
Cb[1]+=cuCabs(Ap[1]);
Cb[2]+=cuCabs(Ap[2]);
Cb[3]+=cuCabs(Ap[3]);
Cbb[idt]=Cb[0]+Cb[1]+Cb[2]+Cb[3];
C[0]= Cbb[idt];
}
/*
Now do the rest of the parts in CPU ( getting the maximum ) Hybrid .. wow
*/
/*
This Kernel Will be called when
M,N %64 != 0
*/
extern "C" __global__ void
magma_zlange_generic(const magmaDoubleComplex *A, double *C, int M, int N, int lda , int N_mod_4)
{
int ibx = blockIdx.x * 64;
int tx = threadIdx.x;
int ty = threadIdx.y;
int idt = ty * 16 + tx;
double Cb[4] = {0,0,0,0};
/*
Rouding up along row.
*/
if( (ibx + idt) >= M )
A+= (M-1);
else
A+= ibx+idt ;
double Cbb;
C+=ibx+idt;
/*
Where to update. In generic case one place will be update more than once.
What about skipping it ?
-- Another level of optimization is required.
*/
if( N >= 8 ) {
const magmaDoubleComplex * Aend = A+lda*N ;
magmaDoubleComplex Ap[4]={A[0],A[lda],A[2*lda],A[3*lda]};
A+=4*lda;
do {
Cb[0]+=cuCabs(Ap[0]);
Ap[0]=A[0];
Cb[1]+=cuCabs(Ap[1]);
Ap[1]=A[lda];
Cb[2]+=cuCabs(Ap[2]);
Ap[2]=A[2*lda];
Cb[3]+=cuCabs(Ap[3]);
Ap[3]=A[3*lda];
A+=4*lda;
}while (A < Aend);
Cb[0]+=cuCabs(Ap[0]);
Cb[1]+=cuCabs(Ap[1]);
Cb[2]+=cuCabs(Ap[2]);
Cb[3]+=cuCabs(Ap[3]);
}
else{
if(N >= 4){
Cb[0]+=cuCabs(A[0]);
Cb[1]+=cuCabs(A[lda]);
Cb[2]+=cuCabs(A[2*lda]);
Cb[3]+=cuCabs(A[3*lda]);
A+= 4*lda ;
}
}
/*
Clean up Code .......................... e.g. N = 1,2,3, 513, 514, 515 etc.
*/
switch(N_mod_4){
case 0:
break;
case 1:
Cb[0]+=cuCabs(A[0]);
break;
case 2:
Cb[0]+=cuCabs(A[0]);
Cb[1]+=cuCabs(A[lda]);
break;
case 3:
Cb[0]+=cuCabs(A[0]);
Cb[1]+=cuCabs(A[lda]);
Cb[2]+=cuCabs(A[2*lda]);
break;
}
/*Computing Final Result*/
Cbb=Cb[0]+Cb[1]+Cb[2]+Cb[3];
C[0]= Cbb;
}
extern "C" void
magmablas_zlange_64_64_16_4(const magmaDoubleComplex *A, double *C,
int M, int N, int lda,int tree_depth)
{
dim3 threads( 16, 4 );
dim3 grid(M/64+(M%64!=0),1);
if( M %64 == 0 && N %64 == 0 ){
magma_zlange_special<<< grid, threads, 0, magma_stream >>> ( A, C , M , N , lda);
}
else{
int N_mod_4 = N % 4 ;
N = N - N_mod_4 ;
magma_zlange_generic<<< grid, threads, 0, magma_stream >>> ( A, C , M , N , lda , N_mod_4);
}
}
extern "C" double
magmablas_zlange(
char norm, magma_int_t M, magma_int_t N,
const magmaDoubleComplex *A, magma_int_t LDA , double *WORK)
{
/*
!!!!!!!!!!!!!!
-- Curreltly it returns NORM = 'I' only.
This is needed for Iterative Refinement
-- Most probably this will be some internal utility function
-- Right now the kernel requires M and N divisible by 64
-- Implemented Generic Case
-- Stan is there any function to get a single value from GPU which is superfast
!!!!!!!!!!!!!!
Purpose
=======
ZLANGE returns the value of the one norm, or the Frobenius norm, or
the infinity norm, or the element of largest absolute value of a
real matrix A.
Description
===========
ZLANGE returns the value
ZLANGE = ( max(abs(A(i,j))), NORM = 'M' or 'm'
(
( norm1(A), NORM = '1', 'O' or 'o'
(
( normI(A), NORM = 'I' or 'i'
(
( normF(A), NORM = 'F', 'f', 'E' or 'e'
where norm1 denotes the one norm of a matrix (maximum column sum),
normI denotes the infinity norm of a matrix (maximum row sum) and
normF denotes the Frobenius norm of a matrix (square root of sum of
squares). Note that max(abs(A(i,j))) is not a consistent matrix norm.
Arguments
=========
NORM (input) CHARACTER*1
Specifies the value to be returned in ZLANGE as described
above.
M (input) INTEGER
The number of rows of the matrix A. M >= 0. When M = 0,
ZLANGE is set to zero.
N (input) INTEGER
The number of columns of the matrix A. N >= 0. When N = 0,
ZLANGE is set to zero.
A (input) DOUBLE PRECISION array, dimension (LDA,N)
The m by n matrix A.
A is in GPU memory.
LDA (input) INTEGER
The leading dimension of the array A. LDA >= max(M,1).
WORK (workspace) DOUBLE PRECISION array, dimension (MAX(1,LWORK)),
where LWORK >= M when NORM = 'I'; otherwise, WORK is not
referenced.
WORK is in GPU memory.
===================================================================== */
if( norm !='I' && norm!='i') {
printf("Only normI(A) is provided in this release!");
exit(-1);
}
magmablas_zlange_64_64_16_4( A, WORK , M , N , LDA , 6 );
int val = cublasIdamax(N, WORK, 1);
double retVal[1];
cublasGetMatrix( 1, 1, sizeof( magmaDoubleComplex ), WORK+val-1, 1, retVal, 1 ) ;
return retVal[0];
}
|
bda76442dc79283a2a554008c8789119da2d6671.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "fewMeansKernels.cuh"
__global__ void findNearestClusterFewMeansKernel(const my_size_t meansSize, const value_t* __restrict__ means, value_t *measnSums, const value_t* __restrict__ data, uint32_t* counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y; //blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x);
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t i = 0; i < meansSize; ++i)
{
distance = 0;
for (my_size_t j = 0; j < dimension; ++j)
{
difference = means[i * dimension + j] - data[id * dimension + j];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = i;
}
}
if (clusterID != -1)
{
// atomic add to threadIdx.yth copy of new means
atomicInc(&counts[(threadIdx.y) * meansSize + clusterID], INT_MAX);
assignedClusters[id] = clusterID;
for (my_size_t j = 0; j < dimension; ++j)
{
atomicAdd(&measnSums[(threadIdx.y) * meansSize * dimension + clusterID * dimension + j], data[id * dimension + j]);
}
}
}
__global__ void countDivFewMeansKernel(const my_size_t meansSize, uint32_t* __restrict__ counts, value_t* __restrict__ means, const value_t* __restrict__ meansSums, const my_size_t dimension, const uint32_t cellsCount)
{
int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
uint32_t count = 0;
value_t sum = meansSums[id];
count = counts[blockIdx.x * blockDim.y + threadIdx.y];
for (my_size_t i = 1; i < cellsCount; i++)
{
sum += meansSums[i * dimension * meansSize + id];
count += counts[i * meansSize + blockIdx.x * blockDim.y + threadIdx.y];
}
means[id] = sum / count;
// counts are not needed
if (threadIdx.x == 0)
{
counts[blockIdx.x * blockDim.y + threadIdx.y] = count;
}
}
// each thread has own copy...delete?
__global__ void findNearestClusterFewMeansKernelV2(const my_size_t meansSize, const value_t* __restrict__ means, value_t* __restrict__ measnSums, const value_t* __restrict__ data, uint32_t* __restrict__ counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t i = 0; i < meansSize; ++i)
{
distance = 0;
for (my_size_t j = 0; j < dimension; ++j)
{
difference = means[i * dimension + j] - data[id * dimension + j];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = i;
}
}
if (clusterID != -1)
{
++counts[meansSize * id + clusterID];
assignedClusters[id] = clusterID;
for (my_size_t j = 0; j < dimension; ++j)
{
measnSums[dimension * (id * meansSize + clusterID) + j] += data[id * dimension + j];
//atomicAdd(&measnSums[blockIdx.y * meansSize * dimension + clusterID * dimension + j], data[id * dimension + j]);
}
}
}
__global__ void countDivFewMeansKernelV2(const my_size_t dataSize, const my_size_t meansSize, uint32_t* __restrict__ counts, value_t* __restrict__ means, value_t* __restrict__ meansSums, const my_size_t dimension)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (my_size_t i = dataSize / 2; i > 0; i >>= 1)
{
if (id < i)
{
for (my_size_t j = 0; j < dimension; ++j)
{
meansSums[id * dimension * meansSize + j] += meansSums[(id + i) * dimension * meansSize + j];
}
counts[id * meansSize] += counts[(id + i) * meansSize];
}
__syncthreads();
}
means[id] /= counts[id % dimension];
}
__global__ void findNearestClusterFewMeansKernelV3(const my_size_t meansSize, const value_t* __restrict__ means, value_t *measnSums, const my_size_t dataSize, const value_t* __restrict__ data, uint32_t* counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
extern __shared__ value_t sharedArray[];
value_t* localSums = (value_t*)&sharedArray[0];
uint32_t* localCounts = (uint32_t *)&sharedArray[blockDim.y * meansSize * dimension];
//memory initialization
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
for (my_size_t d = 0; d < dimension; ++d)
{
localSums[threadIdx.y * meansSize * dimension + m * dimension + d] = 0;
}
localCounts[threadIdx.y * meansSize + m] = 0;
}
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t m = 0; m < meansSize; ++m)
{
distance = 0;
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[m * dimension + d] - data[id * dimension + d];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = m;
}
}
// add data to shared memory
if (id < dataSize)
{
assignedClusters[id] = clusterID;
for (my_size_t d = 0; d < dimension; ++d)
{
atomicAdd(&localSums[threadIdx.y * dimension * meansSize + clusterID * dimension + d], data[id * dimension + d]);
}
atomicInc(&localCounts[threadIdx.y * meansSize + clusterID], INT_MAX);
}
__syncthreads();
for (my_size_t r = blockDim.y / 2; r > 0; r >>= 1)
{
// thread x;y will sum x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thready with y > r will help with reduction - y / r is offset, step is blockdimY / r
for (my_size_t d = threadIdx.y / r; d < dimension; d += blockDim.y / r)
{
localSums[(threadIdx.y % r) * dimension * meansSize + m * dimension + d] += localSums[((threadIdx.y % r) + r) * dimension * meansSize + m * dimension + d];
}
localCounts[(threadIdx.y % r) * meansSize + m] += localCounts[((threadIdx.y % r) + r) * meansSize + m];
}
__syncthreads();
}
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thread.y is offset, step is blockdimY
for (my_size_t d = threadIdx.y; d < dimension; d += blockDim.y)
{
atomicAdd(&measnSums[(blockIdx.x * meansSize + m) * dimension + d], localSums[m * dimension + d]);
}
atomicAdd(&counts[blockIdx.x * meansSize + m], localCounts[m]);
}
}
__global__ void findNearestClusterFewMeansSharedTransposedKernelV3(const my_size_t meansSize, const value_t* __restrict__ means, value_t *measnSums, const my_size_t dataSize, const value_t* __restrict__ data, uint32_t* counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
extern __shared__ value_t sharedArray[];
value_t* localSums = (value_t*)&sharedArray[0];
uint32_t* localCounts = (uint32_t *)&sharedArray[blockDim.y * meansSize * dimension];
// If dimension is less then blockdim.x, threds with id.x will be unutilized - so they can initialize next mean
int meansInitOffset = blockDim.x / dimension;
meansInitOffset = meansInitOffset == 0 ? 1 : meansInitOffset;
//memory initialization
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = 0; m < meansSize; m += meansInitOffset)
{
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
localSums[threadIdx.y * meansSize * dimension + (m + meansInitOffset) * dimension + d] = 0;
}
}
for (my_size_t m = 0; m < meansSize; m += blockDim.x)
{
localCounts[threadIdx.y * meansSize + m] = 0;
}
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t m = 0; m < meansSize; ++m)
{
distance = 0;
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[m * dimension + d] - data[id * dimension + d];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = m;
}
}
// add data to shared memory
if (id < dataSize)
{
assignedClusters[id] = clusterID;
for (my_size_t d = 0; d < dimension; ++d)
{
atomicAdd(&localSums[threadIdx.y * dimension * meansSize + d * meansSize + clusterID], data[id * dimension + d]);
}
atomicInc(&localCounts[threadIdx.y * meansSize + clusterID], INT_MAX);
}
__syncthreads();
for (my_size_t r = blockDim.y / 2; r > 0; r >>= 1)
{
// thread x;y will sum x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thready with y > r will help with reduction - y / r is offset, step is blockdimY / r
for (my_size_t d = threadIdx.y / r; d < dimension; d += blockDim.y / r)
{
localSums[(threadIdx.y % r) * dimension * meansSize + d * meansSize + m] += localSums[((threadIdx.y % r) + r) * dimension * meansSize + d * meansSize + m];
}
if (threadIdx.y == 0)
{
localCounts[(threadIdx.y % r) * meansSize + m] += localCounts[((threadIdx.y % r) + r) * meansSize + m];
}
}
__syncthreads();
}
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thread.y is offset, step is blockdimY
for (my_size_t d = threadIdx.y; d < dimension; d += blockDim.y)
{
atomicAdd(&measnSums[(blockIdx.x * meansSize + m) * dimension + d], localSums[d * meansSize + m]);
}
atomicAdd(&counts[blockIdx.x * meansSize + m], localCounts[m]);
}
}
__global__ void countDivFewMeansKernelV3(const my_size_t meansSize, uint32_t* __restrict__ counts, value_t* __restrict__ means, value_t* __restrict__ meansSums, const my_size_t dimension)
{
//threadID.z - meansID
//threadID.y - meansCopyID
//threadID.x - dimension
int meansID = threadIdx.z + blockDim.z * blockIdx.x;
for (my_size_t r = blockDim.y; r > 0; r >>= 1)
{
if (threadIdx.y < r)
{
meansSums[threadIdx.y * dimension * meansSize + meansID * dimension + threadIdx.x] += meansSums[(threadIdx.y + r) * dimension * meansSize + meansID * dimension + threadIdx.x];
if (threadIdx.x == 0)
{
counts[threadIdx.y * meansSize + meansID] += counts[(threadIdx.y + r) * meansSize + meansID];
}
}
__syncthreads();
}
means[meansID * dimension + threadIdx.x] = meansSums[meansID * dimension + threadIdx.x] / (value_t)counts[meansID];
}
| bda76442dc79283a2a554008c8789119da2d6671.cu | #include "fewMeansKernels.cuh"
__global__ void findNearestClusterFewMeansKernel(const my_size_t meansSize, const value_t* __restrict__ means, value_t *measnSums, const value_t* __restrict__ data, uint32_t* counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y; //blockDim.x * (blockIdx.y * gridDim.x + blockIdx.x);
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t i = 0; i < meansSize; ++i)
{
distance = 0;
for (my_size_t j = 0; j < dimension; ++j)
{
difference = means[i * dimension + j] - data[id * dimension + j];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = i;
}
}
if (clusterID != -1)
{
// atomic add to threadIdx.yth copy of new means
atomicInc(&counts[(threadIdx.y) * meansSize + clusterID], INT_MAX);
assignedClusters[id] = clusterID;
for (my_size_t j = 0; j < dimension; ++j)
{
atomicAdd(&measnSums[(threadIdx.y) * meansSize * dimension + clusterID * dimension + j], data[id * dimension + j]);
}
}
}
__global__ void countDivFewMeansKernel(const my_size_t meansSize, uint32_t* __restrict__ counts, value_t* __restrict__ means, const value_t* __restrict__ meansSums, const my_size_t dimension, const uint32_t cellsCount)
{
int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
uint32_t count = 0;
value_t sum = meansSums[id];
count = counts[blockIdx.x * blockDim.y + threadIdx.y];
for (my_size_t i = 1; i < cellsCount; i++)
{
sum += meansSums[i * dimension * meansSize + id];
count += counts[i * meansSize + blockIdx.x * blockDim.y + threadIdx.y];
}
means[id] = sum / count;
// counts are not needed
if (threadIdx.x == 0)
{
counts[blockIdx.x * blockDim.y + threadIdx.y] = count;
}
}
// each thread has own copy...delete?
__global__ void findNearestClusterFewMeansKernelV2(const my_size_t meansSize, const value_t* __restrict__ means, value_t* __restrict__ measnSums, const value_t* __restrict__ data, uint32_t* __restrict__ counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t i = 0; i < meansSize; ++i)
{
distance = 0;
for (my_size_t j = 0; j < dimension; ++j)
{
difference = means[i * dimension + j] - data[id * dimension + j];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = i;
}
}
if (clusterID != -1)
{
++counts[meansSize * id + clusterID];
assignedClusters[id] = clusterID;
for (my_size_t j = 0; j < dimension; ++j)
{
measnSums[dimension * (id * meansSize + clusterID) + j] += data[id * dimension + j];
//atomicAdd(&measnSums[blockIdx.y * meansSize * dimension + clusterID * dimension + j], data[id * dimension + j]);
}
}
}
__global__ void countDivFewMeansKernelV2(const my_size_t dataSize, const my_size_t meansSize, uint32_t* __restrict__ counts, value_t* __restrict__ means, value_t* __restrict__ meansSums, const my_size_t dimension)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
for (my_size_t i = dataSize / 2; i > 0; i >>= 1)
{
if (id < i)
{
for (my_size_t j = 0; j < dimension; ++j)
{
meansSums[id * dimension * meansSize + j] += meansSums[(id + i) * dimension * meansSize + j];
}
counts[id * meansSize] += counts[(id + i) * meansSize];
}
__syncthreads();
}
means[id] /= counts[id % dimension];
}
__global__ void findNearestClusterFewMeansKernelV3(const my_size_t meansSize, const value_t* __restrict__ means, value_t *measnSums, const my_size_t dataSize, const value_t* __restrict__ data, uint32_t* counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
extern __shared__ value_t sharedArray[];
value_t* localSums = (value_t*)&sharedArray[0];
uint32_t* localCounts = (uint32_t *)&sharedArray[blockDim.y * meansSize * dimension];
//memory initialization
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
for (my_size_t d = 0; d < dimension; ++d)
{
localSums[threadIdx.y * meansSize * dimension + m * dimension + d] = 0;
}
localCounts[threadIdx.y * meansSize + m] = 0;
}
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t m = 0; m < meansSize; ++m)
{
distance = 0;
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[m * dimension + d] - data[id * dimension + d];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = m;
}
}
// add data to shared memory
if (id < dataSize)
{
assignedClusters[id] = clusterID;
for (my_size_t d = 0; d < dimension; ++d)
{
atomicAdd(&localSums[threadIdx.y * dimension * meansSize + clusterID * dimension + d], data[id * dimension + d]);
}
atomicInc(&localCounts[threadIdx.y * meansSize + clusterID], INT_MAX);
}
__syncthreads();
for (my_size_t r = blockDim.y / 2; r > 0; r >>= 1)
{
// thread x;y will sum x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thready with y > r will help with reduction - y / r is offset, step is blockdimY / r
for (my_size_t d = threadIdx.y / r; d < dimension; d += blockDim.y / r)
{
localSums[(threadIdx.y % r) * dimension * meansSize + m * dimension + d] += localSums[((threadIdx.y % r) + r) * dimension * meansSize + m * dimension + d];
}
localCounts[(threadIdx.y % r) * meansSize + m] += localCounts[((threadIdx.y % r) + r) * meansSize + m];
}
__syncthreads();
}
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thread.y is offset, step is blockdimY
for (my_size_t d = threadIdx.y; d < dimension; d += blockDim.y)
{
atomicAdd(&measnSums[(blockIdx.x * meansSize + m) * dimension + d], localSums[m * dimension + d]);
}
atomicAdd(&counts[blockIdx.x * meansSize + m], localCounts[m]);
}
}
__global__ void findNearestClusterFewMeansSharedTransposedKernelV3(const my_size_t meansSize, const value_t* __restrict__ means, value_t *measnSums, const my_size_t dataSize, const value_t* __restrict__ data, uint32_t* counts, uint32_t* __restrict__ assignedClusters, const my_size_t dimension)
{
unsigned int id = threadIdx.x + threadIdx.y * blockDim.x + blockIdx.x * blockDim.x * blockDim.y;
extern __shared__ value_t sharedArray[];
value_t* localSums = (value_t*)&sharedArray[0];
uint32_t* localCounts = (uint32_t *)&sharedArray[blockDim.y * meansSize * dimension];
// If dimension is less then blockdim.x, threds with id.x will be unutilized - so they can initialize next mean
int meansInitOffset = blockDim.x / dimension;
meansInitOffset = meansInitOffset == 0 ? 1 : meansInitOffset;
//memory initialization
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = 0; m < meansSize; m += meansInitOffset)
{
for (my_size_t d = threadIdx.x; d < dimension; d += blockDim.x)
{
localSums[threadIdx.y * meansSize * dimension + (m + meansInitOffset) * dimension + d] = 0;
}
}
for (my_size_t m = 0; m < meansSize; m += blockDim.x)
{
localCounts[threadIdx.y * meansSize + m] = 0;
}
value_t minDistance = LLONG_MAX, distance = 0, difference = 0;
int clusterID = -1;
for (my_size_t m = 0; m < meansSize; ++m)
{
distance = 0;
for (my_size_t d = 0; d < dimension; ++d)
{
difference = means[m * dimension + d] - data[id * dimension + d];
distance += difference * difference;
}
if (minDistance > distance)
{
minDistance = distance;
clusterID = m;
}
}
// add data to shared memory
if (id < dataSize)
{
assignedClusters[id] = clusterID;
for (my_size_t d = 0; d < dimension; ++d)
{
atomicAdd(&localSums[threadIdx.y * dimension * meansSize + d * meansSize + clusterID], data[id * dimension + d]);
}
atomicInc(&localCounts[threadIdx.y * meansSize + clusterID], INT_MAX);
}
__syncthreads();
for (my_size_t r = blockDim.y / 2; r > 0; r >>= 1)
{
// thread x;y will sum x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thready with y > r will help with reduction - y / r is offset, step is blockdimY / r
for (my_size_t d = threadIdx.y / r; d < dimension; d += blockDim.y / r)
{
localSums[(threadIdx.y % r) * dimension * meansSize + d * meansSize + m] += localSums[((threadIdx.y % r) + r) * dimension * meansSize + d * meansSize + m];
}
if (threadIdx.y == 0)
{
localCounts[(threadIdx.y % r) * meansSize + m] += localCounts[((threadIdx.y % r) + r) * meansSize + m];
}
}
__syncthreads();
}
// thread x;y will set x+k*blocksizex mean
for (my_size_t m = threadIdx.x; m < meansSize; m += blockDim.x)
{
// thread.y is offset, step is blockdimY
for (my_size_t d = threadIdx.y; d < dimension; d += blockDim.y)
{
atomicAdd(&measnSums[(blockIdx.x * meansSize + m) * dimension + d], localSums[d * meansSize + m]);
}
atomicAdd(&counts[blockIdx.x * meansSize + m], localCounts[m]);
}
}
__global__ void countDivFewMeansKernelV3(const my_size_t meansSize, uint32_t* __restrict__ counts, value_t* __restrict__ means, value_t* __restrict__ meansSums, const my_size_t dimension)
{
//threadID.z - meansID
//threadID.y - meansCopyID
//threadID.x - dimension
int meansID = threadIdx.z + blockDim.z * blockIdx.x;
for (my_size_t r = blockDim.y; r > 0; r >>= 1)
{
if (threadIdx.y < r)
{
meansSums[threadIdx.y * dimension * meansSize + meansID * dimension + threadIdx.x] += meansSums[(threadIdx.y + r) * dimension * meansSize + meansID * dimension + threadIdx.x];
if (threadIdx.x == 0)
{
counts[threadIdx.y * meansSize + meansID] += counts[(threadIdx.y + r) * meansSize + meansID];
}
}
__syncthreads();
}
means[meansID * dimension + threadIdx.x] = meansSums[meansID * dimension + threadIdx.x] / (value_t)counts[meansID];
}
|
b54fd4dd6586f9caf47c8c88fa1e9292d2d6f5a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
#include </u/local/cuda/5.0/include/cuda.h>
#include </u/local/cuda/5.0/include/cuda_runtime.h>
#include </u/local/cuda/5.0/include/hiprand/hiprand_kernel.h>
#include "./NDR_Solver.cu"
using namespace std;
#define VARIATION
//#define OUTPUT_DETAIL
#define shaodi_pi 3.1415926
#define CUDA_CALL(x) do { if( (x) ! = hipSuccess ){\
printf("Error at %s:%d\n",__FILE__,__LINE__ );\
exit(1);} } while(0)
#define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) {\
printf("Error at %s:%d\n",__FILE__,__LINE__);\
exit(1);}} while(0)
#ifndef OUTPUT_DETAIL
__global__ void LLG(double* g_v_para, double* writeSuccess, int initialstate, double t_step,
int trials_p_thread, bool isPS, double ori_length, double sigma_l, double ori_width, double sigma_w,
double ori_tfl, double sigma_tfl, double sigma_mgo, double ori_Nx, double ori_Ny, double ori_Nz,
double* g_lin_dep_factor, int isNDR, const double* VIGndr, const double* VIGmos, double* g_Energy,
double* g_SwitchingTime, double* g_EndVndr, double Cload ){
#endif
#ifdef OUTPUT_DETAIL
__global__ void LLG(double* g_v_para, double* writeSuccess, int initialstate, double t_step,
int trials_p_thread, bool isPS, double ori_length, double sigma_l, double ori_width,
double sigma_w, double ori_tfl, double sigma_tfl, double sigma_mgo, double ori_Nx,
double ori_Ny, double ori_Nz, double* g_lin_dep_factor, int isNDR, const double* VIGndr,
const double* VIGmos, double* g_Energy, double* g_SwitchingTime, double* g_EndVndr, double Cload,
double* g_NDRturn, double* g_initialR, double* g_NDRoff){
#endif
/* -------------------------------------------
Input Parameters From User
-------------------------------------------*/
int this_id = (blockIdx.x * blockDim.x + threadIdx.x) ;
//initiate state for following random generation
hiprandState_t localState;
hiprand_init(this_id, this_id, 0, &localState);
double Nx = ori_Nx;//origin: // x Demagnetization factor
double Ny = ori_Ny; // y Demagnetization factor
double Nz = ori_Nz; // z Demagnetization factor
//Parameter calculation
double length = ori_length; //length of MTJ
double width = ori_width; //width of MTJ
double Rp0 = 2e3;
double dMgO_a = 1.54e-3, dMgO_b = 1.1537278e10;//origin:9.24e9;
double Area = shaodi_pi*length*width/4; // Area without variation
double dMgO_base = (log(Rp0 * Area * 10e12) - log(dMgO_a)) / dMgO_b; // MgO thickness [m]
double TMR = 1.5; // TMR at zero bias voltage
double Rap0 = Rp0 *( 1+TMR);
double Temperature = g_v_para[11];//27+273; // Temperature
double pulse_width = g_v_para[1];
double V_p = g_v_para[2], V_ap = g_v_para[3];
double sigma_V_p = g_v_para[4], sigma_V_ap = g_v_para[5];
if( sigma_V_p == 0){
sigma_V_p = 1e-9;
}
double mean_tr = g_v_para[6], sigma_tr = g_v_para[7], mean_tf = g_v_para[8], sigma_tf= g_v_para[9], delay_time = 0e-9, sense_time = g_v_para[10];
int n_sim = (pulse_width+delay_time + sense_time)/t_step ;//Simulation time
#ifdef OUTPUT_DETAIL
double peak_voltage = Peak_voltage(VIGndr);
double peak_current = IG_V(peak_voltage,VIGndr,1);
#endif
/* -------------------------------------------
Constants
-------------------------------------------*/
double hbar = 1.05457173e-34; // Reduced Planck constant, [J*s]
double k = 1.3806488e-23; // Boltzmann constant, [J/K]
double u0 = 4e-7*shaodi_pi; // Vacuum permeability, [Vs/(Am)]
double q = 1.60217657e-19; // Electron charge, [C]
double alphac = 0.02; // LLGE damping factor
double gammap = (221276/(1+pow(alphac,2))); // Gyromagnetic ratio [m/(A x s)]
double T0 = 1120;
double Ms0 = 1393128.323;//origin:1.44e6;
double Ki0 =1.479036e-3;//origin:1.46e-3;
double Xi0 = 0; //53.39247e-15; //origin:58.9e-15;
if(isPS) Xi0 = 53.39247e-15;
double P_tunnel = 0.2; // the polarization of the tunnel currentdouble
double Pol = 1; // Polarization for Spin Torque
/******************simulation trials *************/
for( int i_trial = 0; i_trial < trials_p_thread; i_trial++){
length = ori_length; //length of MTJ
width = ori_width; //width of MTJ
double tfl = ori_tfl; //thickness of free layer
double rise_time = mean_tr;
double fall_time = mean_tf;
#ifdef VARIATION
//Dimention variation
rise_time += sigma_tr*hiprand_normal_double(&localState);
fall_time += sigma_tf*hiprand_normal_double(&localState);
double v_variation = sigma_V_p * hiprand_normal_double(&localState);
length = ori_length + sigma_l*hiprand_normal_double(&localState);
width = ori_width + sigma_w*hiprand_normal_double(&localState);
tfl = ori_tfl + sigma_tfl*hiprand_normal_double(&localState);
double dMgO = dMgO_base + sigma_mgo*hiprand_normal_double(&localState);
double temp_Nx = ori_Nx + g_lin_dep_factor[0]*(length-ori_length) + g_lin_dep_factor[1] * ( width - ori_width) + g_lin_dep_factor[2] * (tfl - ori_tfl) ;
double temp_Ny = ori_Ny + g_lin_dep_factor[3]*(length-ori_length) + g_lin_dep_factor[4] * ( width - ori_width) + g_lin_dep_factor[5] * (tfl - ori_tfl) ;
double temp_Nz = ori_Nz + g_lin_dep_factor[6]*(length-ori_length) + g_lin_dep_factor[7] * ( width - ori_width) + g_lin_dep_factor[8] * (tfl - ori_tfl) ;
Nx = temp_Nx / (temp_Nx + temp_Ny + temp_Nz);
Ny = temp_Ny / (temp_Nx + temp_Ny + temp_Nz);
Nz = temp_Nz / (temp_Nx + temp_Ny + temp_Nz);
#endif
Area = shaodi_pi*length*width/4; // Area without variation
double areamtj = Area ; // MTJ area [m^2]
double Rp = exp(dMgO * dMgO_b)*dMgO_a / (Area * 10e12);
double Rap = (1+TMR)*Rp; // Anti-parallel resistance [Ohms]
double B1 = 0;//origin 0.2 // Field-like torque linear parameter [unitless]
double B2 = 0;//origin 0.02; // Field-like torque quadratic parameter [1/A]
int sim_per_trial = 1;
if(isNDR >=2){ // read mode, simulate the read margin by read both ap and p MTJ
sim_per_trial = 2;
}
for (int i_read = 0; i_read < sim_per_trial; i_read ++){
int initial_state = initialstate; // Inital state [0 = parallel, 1 = anti-parallel]
if ( i_read == 1){ // simulate read for the other state
initial_state = 1 - initial_state;
}
double P [3] = {0, 0, -1}; // Direction of polarization
double Ext [3] = {0, 0, 0}; // External magnetic field [A/m] - 1 oersted [Oe] = 79.5774715459424 ampere/meter [A/m]
double Ms = Ms0 * ( 1 - pow(Temperature/T0,1.5)); // Saturation magnetization [A/m] - 1e6 A/m = 1000 emu/cc
double dstray = 20e-9, tstray = 1.164656e-9;
//double Ext[3] = {-Ms*length*width/4/shaodi_pi*((dstray+tstray)/(pow(length/2,2)*sqrt(pow(length/2,2)+pow(dstray+tstray,2)))-(dstray-tstray)/(pow(length/2,2)*sqrt(pow(length/2,2)+pow(dstray-tstray,2)))),0,0};
double Ki = Ki0 * pow(Ms/Ms0, 2.18); // Anisotropy field constant [J/m^2]
double Xi = Xi0* pow(Ms/Ms0, 2.83); // VCMA field constant [J/(V x m)]
double Gt = 1/(Rp*(1+(TMR/(TMR+2)))); // Direct elastic tunneling conductance [S]
double KiPF = (2*Ki)/(tfl*u0*Ms); // Prefactor for interface anisotropy effective field
double VCMAPF = (2*Xi)/(u0*Ms*dMgO*tfl); // Prefactor for VCMA effective field
double Gsi = 0; // Conductance due to imperfections in Mgo [S]
//double Jc0 = (2*Ms*tfl*q*u0)/(hbar*Pol); // Normalization Constant for Current Density
double volume = areamtj*tfl; // MTJ volume [m^3]
double Hth = sqrt((2*k*Temperature*alphac)/(u0*gammap*Ms*volume*t_step)); // Amplitude of Thermal Field
/* -------------------------------------------
Internal Variables
-------------------------------------------*/
double costheta = 0; // the angle between the magnization of free and reference layers
double g_sv = 0; // the polarization efficiency in spin valve
double g_tunnel = 0; // the polarization efficiency in tunnel current
//double m_old [3] = {0, 0, 0}; // Normalized previous magnetization
double Heff_old [3] = {0, 0, 0}; // Previous Heff components [A/m]
double m_int [3] = {0, 0, 0}; // Intermediate normalized magnetization
double dm_int [3] = {0, 0, 0}; // Intermediate derivative of magnetization
double M_int [3] = {0, 0, 0}; // Intermediate denormalized magnetization
//double Heff_int [3] = {0, 0, 0}; // Intermediate Heff components [A/m]
double dm [3] = {0, 0, 0}; // Time derivative of magnetization [1/s]
double M [3] = {0, 0, 0}; // Denormalized magnetization
double mcrossp_int [3] = {0, 0, 0}; // Intermediate cross product components (m x p)
double mcrossHeff_int [3] = {0, 0, 0}; // Intermediate cross product components (m x Heff)
double mcrossHth_int [3] = {0, 0, 0}; // Intermediate cross product components (m x Hth)
double mcrossmcrossp_int [3] = {0, 0, 0}; // Intermediate double cross product components (m x m x p)
double mcrossmcrossHeff_int [3] = {0, 0, 0}; // Intermediate double cross product components (m x m x Heff)
double mcrossp [3] = {0, 0, 0}; // Cross product components (m x p)
double mcrossHeff [3] = {0, 0, 0}; // Cross product components (m x Heff)
double mcrossHth [3] = {0, 0, 0}; // Cross product components (m x Hth)
double mcrossmcrossp [3] = {0, 0, 0}; // Cross product components (m x m x p)
double mcrossmcrossHeff [3] = {0, 0, 0}; // Cross product components (m x m x Heff)
double randomHth [3] = {0, 0, 0}; // Vector of random variables
double STT = 0; // Strenght of STT term
double FLT = 0; // Strenght of FLT term
// -------------------------------------------
// Initialize Variables
// -------------------------------------------
double m [3] = {0, 0, 1}; // Normalized mangetization
double R = Rap; // MTJ resistance [Ohms]
if(initial_state != 1){
R = Rp; // MTJ resistance [Ohms]
m[2] = -1; // Normalized mangetization
}
double J = 0; // Current density [A/m^2]
double V = 0; // MTJ Voltage [V]
double V_offset = 0;
/*********** edition for NDR starts here ***********/
//The parameters for calculating NDR
double Vndr = 0;
double Imtj = 0; //current through MTJ and nmos
double Vmos = 0;
double d_Rmtj = 0; // delta Rmtj
double d_Imtj = 0; // delta Imtj
double Indr = 0; //current through NDR
double d_Vndr = 0; //
double Csline = Cload;
double Cbline = Cload;
double d_vdd =0, new_vdd =0, vdd =0;
V = 0; // mtj voltage
#ifdef OUTPUT_DETAIL
bool isNDRturn = false, isNDRoff = false;
g_NDRturn [this_id*trials_p_thread + i_trial] = 0;
g_NDRoff [this_id*trials_p_thread + i_trial] = 0;
g_initialR [this_id*trials_p_thread + i_trial] = R;
#endif
if(isNDR == 1){ //NDR write
Vndr = Solve_stable_vndr(VIGndr, VIGmos, R, V_ap);
Indr = IG_V(Vndr, VIGndr,1);
Imtj = Indr;
Vmos = V_I(Imtj,VIGmos);
vdd = V_ap;
}
double energy = V_ap*V_ap*Cload; // Pre-charge energy
bool isSwitched = false;
g_SwitchingTime[this_id * trials_p_thread+i_trial] = pulse_width;
/*********** edition for NDR ends here ***********/
for(int i=1;i<=n_sim;i++){
// Update values
double m_old [3] = {m[0], m[1], m[2]};
// Update voltage/current density
double V_ub = V_ap + V_offset;
#ifdef VARIATION
V_ub += v_variation * ( 1 + (sigma_V_ap/sigma_V_p - 1) * (R - Rp0)/(Rap0 - Rp0) ) ;
#endif
double curr_time = i * t_step;
if(curr_time < delay_time || curr_time > delay_time + pulse_width){
new_vdd = 0;
}
else{
if(curr_time < delay_time + rise_time){
new_vdd = (curr_time - delay_time)/rise_time * V_ub;
}
else{
if(curr_time <= delay_time + pulse_width - fall_time){
new_vdd = V_ub;
}
else{
new_vdd = V_ub * ( delay_time + pulse_width - curr_time) / fall_time;
}
}
}
d_vdd = new_vdd - vdd;
vdd = new_vdd;
//NDR calculation
/*********** edition for NDR starts here ***********/
#ifdef OUTPUT_DETAIL
if(isNDR==1){
if( !isNDRturn && abs(Vndr) > abs(peak_voltage)){
g_NDRturn [this_id*trials_p_thread + i_trial] = R;
isNDRturn = true;
}
if( isNDRturn && !isNDRoff && abs(Indr) < 0.25*abs(peak_current)){
g_NDRoff [this_id*trials_p_thread + i_trial] = R;
isNDRoff = true;
}
} else if(isNDR>=2){
if( !isNDRturn && abs(Vndr) > abs(peak_voltage)){
g_NDRturn [this_id*trials_p_thread + i_trial] = curr_time;
isNDRturn = true;
}
if (!isNDRoff && curr_time >= delay_time + pulse_width+sense_time){
// if (!isNDRoff && i>=1){
isNDRoff = true;
// g_initialR [this_id*trials_p_thread + i_trial] = d_Vndr;
// g_NDRturn [this_id*trials_p_thread + i_trial] = Imtj;
g_NDRoff [this_id*trials_p_thread + i_trial] = Vndr + V + Vmos;
}
}
#endif
if(isNDR==1 ){ //NDR write for AP-MTJ
//Solve the series of one NDR, one MTJ and one MOS with cap at MTJ
d_Imtj = ( t_step*Indr - t_step*Imtj - Cload*Imtj*d_Rmtj) / ( Cload/IG_V(Vmos,VIGmos,2) + Cload*R);
Imtj += d_Imtj;
Vmos = V_I(Imtj, VIGmos);
Vndr = vdd - Imtj*R - Vmos;
Indr = IG_V(Vndr,VIGndr,1);
V = Imtj*R;
energy += vdd * Indr * t_step;
}
else if(isNDR == 3){ // NDR read
if(curr_time < delay_time+pulse_width){ // precharging
//solve equation: Imtj = (vdd - Vmos(Imtj) - Vndr)/Rmtj = d(Csline*Vndr)/dt + Indr(Vndr)
Vmos = V_I(Imtj,VIGmos);
d_Imtj = (d_vdd - d_Vndr - d_Rmtj*Imtj)/(R+1/IG_V(Vmos,VIGmos,2));
Imtj += d_Imtj;
Indr = IG_V(Vndr,VIGndr,1);
d_Vndr = ( Imtj - Indr ) * t_step / Csline;
Vndr += d_Vndr;
V = Imtj*R;
energy += vdd * Imtj * t_step;
}
else{ // discharging state
//Solve equation: - d ((Imtj*R + Vmos(Imtj) + Vndr)*Cbline )/dt = Imtj = d(Vndr*Csline)/dt + Indr(Vndr)
d_Imtj = (-Imtj*t_step - Cbline*Imtj*d_Rmtj -d_Vndr*Cbline) / ( Cbline/IG_V(Vmos,VIGmos,2) + Cbline*R);
Imtj += d_Imtj;
Vmos = V_I(Imtj,VIGmos);
d_Vndr = (Imtj - Indr) * t_step / Csline;
Vndr += d_Vndr;
Indr = IG_V(Vndr,VIGndr,1);
V = Imtj*R;
}
}
else if (isNDR == 2 || isNDR == 0){
if(curr_time < delay_time+pulse_width){ // precharging
//solve equation: Imtj = (vdd - Vmos(Imtj) )/Rmtj = 0
d_Imtj = (d_vdd-d_Rmtj*Imtj)/(R+1/IG_V(Vmos,VIGmos,2));
Imtj += d_Imtj;
V = Imtj*R;
Vmos = vdd - V;
Indr = Imtj + R*Cbline*d_Imtj/t_step + Imtj*Cbline*d_Rmtj/t_step + Cbline/IG_V(Vmos,VIGmos,2)*d_Imtj/t_step;//The total I but not current of ndr, because there is no ndr
energy += vdd * Indr * t_step;
}
else{ // discharging state
//Solve equation: - d ((Imtj*R + Vmos(Imtj) )*Cbline )/dt = Imtj
d_Imtj = (-Imtj*t_step - Cbline*Imtj*d_Rmtj ) / ( Cbline/IG_V(Vmos,VIGmos,2) + Cbline*R);
Imtj += d_Imtj;
Vmos = V_I(Imtj,VIGmos);
V = Imtj*R;
}
}
// else{
//Solve the series of one MTJ and one MOS with cap at MTJ
// if(curr_time < delay_time+pulse_width){
// d_Imtj = (d_vdd-d_Rmtj*Imtj)/(R+1/IG_V(Vmos,VIGmos,2));
// Imtj += d_Imtj;
// V = Imtj*R;
// Vmos = vdd - V;
// Indr = Imtj + R*Cload*d_Imtj/t_step + Imtj*Cload*d_Rmtj/t_step + Cload/IG_V(Vmos,VIGmos,2)*d_Imtj/t_step;//The total I but not current of ndr, because there is no ndr
// energy += vdd * Indr * t_step;
// }
//Test whether switched
if(!isSwitched){
if( (initial_state ==0 && R >= Rp*(1+TMR/2)) || ( initial_state ==1 && R <= Rp*(1+TMR/2)) ){
isSwitched = true;
g_SwitchingTime[this_id * trials_p_thread+i_trial] = curr_time;
}
}
/*********** edition for NDR ends here ***********/
// Update effective magnetic field Heff_old
Heff_old[0] = Ext[0]-Ms*Nx*m_old[0];
Heff_old[1] = Ext[1]-Ms*Ny*m_old[1];
Heff_old[2] = Ext[2]-Ms*Nz*m_old[2]+(KiPF*m_old[2]-VCMAPF*m_old[2]*V);
//Calculate STT factor
J = V/(R*areamtj);
// costheta = m_old[0]*P[0] + m_old[1]*P[1] + m_old[2]*P[2];
// g_tunnel = 1/2 * P_tunnel / ( 1 + pow(P_tunnel,2)*costheta);
// g_sv = 1 / ( -4 + pow(( 1 / sqrt(Pol) + sqrt(Pol) ), 3) * (3 + costheta) / 4);
// STT = gammap*J* hbar*(g_tunnel+g_sv)/(2*Ms*tfl*q*u0);
STT = Pol*gammap*J* hbar/(2*Ms*tfl*q*u0);
//STT = gammap*J/Jc0;
FLT = STT*B1+STT*B2*areamtj*J;
// Calculate m x Hth
mcrossHth_int[0]=m_old[1]*randomHth[2]-m_old[2]*randomHth[1];
mcrossHth_int[1]=m_old[2]*randomHth[0]-m_old[0]*randomHth[2];
mcrossHth_int[2]=m_old[0]*randomHth[1]-m_old[1]*randomHth[0];
// Calculate m x p and m x m x p
mcrossp_int[0]=m_old[1]*P[2]-m_old[2]*P[1];
mcrossp_int[1]=m_old[2]*P[0]-m_old[0]*P[2];
mcrossp_int[2]=m_old[0]*P[1]-m_old[1]*P[0];
mcrossmcrossp_int[0]=m_old[1]*mcrossp_int[2]-m_old[2]*mcrossp_int[1];
mcrossmcrossp_int[1]=m_old[2]*mcrossp_int[0]-m_old[0]*mcrossp_int[2];
mcrossmcrossp_int[2]=m_old[0]*mcrossp_int[1]-m_old[1]*mcrossp_int[0];
// Calculate m x Heff and m x m x Heff
mcrossHeff_int[0]=m_old[1]*Heff_old[2]-m_old[2]*Heff_old[1];
mcrossHeff_int[1]=m_old[2]*Heff_old[0]-m_old[0]*Heff_old[2];
mcrossHeff_int[2]=m_old[0]*Heff_old[1]-m_old[1]*Heff_old[0];
mcrossmcrossHeff_int[0]=m_old[1]*mcrossHeff_int[2]-m_old[2]*mcrossHeff_int[1];
mcrossmcrossHeff_int[1]=m_old[2]*mcrossHeff_int[0]-m_old[0]*mcrossHeff_int[2];
mcrossmcrossHeff_int[2]=m_old[0]*mcrossHeff_int[1]-m_old[1]*mcrossHeff_int[0];
// Use the LLG equation w/ Heun's Method to update the magnetization
dm_int[0] = -gammap*(mcrossHeff_int[0]+mcrossHth_int[0]) - gammap*alphac*mcrossmcrossHeff_int[0] + STT*mcrossmcrossp_int[0] + FLT*mcrossp_int[0];
dm_int[1] = -gammap*(mcrossHeff_int[1]+mcrossHth_int[1]) - gammap*alphac*mcrossmcrossHeff_int[1] + STT*mcrossmcrossp_int[1] + FLT*mcrossp_int[1];
dm_int[2] = -gammap*(mcrossHeff_int[2]+mcrossHth_int[2]) - gammap*alphac*mcrossmcrossHeff_int[2] + STT*mcrossmcrossp_int[2] + FLT*mcrossp_int[2];
M_int[0] = m_old[0] + (dm_int[0]*t_step);
M_int[1] = m_old[1] + (dm_int[1]*t_step);
M_int[2] = m_old[2] + (dm_int[2]*t_step);
m_int[0] = M_int[0]/sqrt(M_int[0]*M_int[0]+M_int[1]*M_int[1]+M_int[2]*M_int[2]);
m_int[1] = M_int[1]/sqrt(M_int[0]*M_int[0]+M_int[1]*M_int[1]+M_int[2]*M_int[2]);
m_int[2] = M_int[2]/sqrt(M_int[0]*M_int[0]+M_int[1]*M_int[1]+M_int[2]*M_int[2]);
// Update the thermal field and current values (time evolves)
double2 gen_x12;
double gen_x3;
gen_x12 = hiprand_normal2_double(&localState);
randomHth[0] = Hth*gen_x12.x;
randomHth[1] = Hth*gen_x12.y;
gen_x3 = hiprand_normal_double(&localState);
randomHth[2] = Hth*gen_x3;
//STT calculation
// costheta = m_int[0]*P[0] + m_int[1]*P[1] + m_int[2]*P[2];
// g_tunnel = 1/2 * P_tunnel / ( 1 + pow(P_tunnel,2)*costheta);
// g_sv = 1 / ( -4 + pow(( 1 / sqrt(Pol) + sqrt(Pol) ), 3) * (3 + costheta) / 4);
// STT = gammap*J* hbar*(g_tunnel+g_sv)/(2*Ms*tfl*q*u0);
STT = Pol*gammap*J* hbar/(2*Ms*tfl*q*u0);
//STT = gammap*J/Jc0;
FLT = STT*B1+STT*B2*areamtj*J;
// Update intermediate effective magnetic field Heff
double Heff_int [3] = {Ext[0]-Ms*Nx*m_int[0], Ext[1]-Ms*Ny*m_int[1], Ext[2]-Ms*Nz*m_int[2]+(KiPF*m_int[2]-VCMAPF*m_int[2]*V)};
// Calculate m x Hth
mcrossHth[0]=m_int[1]*randomHth[2]-m_int[2]*randomHth[1];
mcrossHth[1]=m_int[2]*randomHth[0]-m_int[0]*randomHth[2];
mcrossHth[2]=m_int[0]*randomHth[1]-m_int[1]*randomHth[0];
// Calculate m x p and m x m x p
mcrossp[0]=m_int[1]*P[2]-m_int[2]*P[1];
mcrossp[1]=m_int[2]*P[0]-m_int[0]*P[2];
mcrossp[2]=m_int[0]*P[1]-m_int[1]*P[0];
mcrossmcrossp[0]=m_int[1]*mcrossp[2]-m_int[2]*mcrossp[1];
mcrossmcrossp[1]=m_int[2]*mcrossp[0]-m_int[0]*mcrossp[2];
mcrossmcrossp[2]=m_int[0]*mcrossp[1]-m_int[1]*mcrossp[0];
// Calculate m x Heff and m x m x Heff
mcrossHeff[0]=m_int[1]*Heff_int[2]-m_int[2]*Heff_int[1];
mcrossHeff[1]=m_int[2]*Heff_int[0]-m_int[0]*Heff_int[2];
mcrossHeff[2]=m_int[0]*Heff_int[1]-m_int[1]*Heff_int[0];
mcrossmcrossHeff[0]=m_int[1]*mcrossHeff[2]-m_int[2]*mcrossHeff[1];
mcrossmcrossHeff[1]=m_int[2]*mcrossHeff[0]-m_int[0]*mcrossHeff[2];
mcrossmcrossHeff[2]=m_int[0]*mcrossHeff[1]-m_int[1]*mcrossHeff[0];
// Now use intermediate value in final value computation
dm[0] = -gammap*(mcrossHeff[0]+mcrossHth[0]) - gammap*alphac*mcrossmcrossHeff[0] + STT*mcrossmcrossp[0] + FLT*mcrossp[0];
dm[1] = -gammap*(mcrossHeff[1]+mcrossHth[1]) - gammap*alphac*mcrossmcrossHeff[1] + STT*mcrossmcrossp[1] + FLT*mcrossp[1];
dm[2] = -gammap*(mcrossHeff[2]+mcrossHth[2]) - gammap*alphac*mcrossmcrossHeff[2] + STT*mcrossmcrossp[2] + FLT*mcrossp[2];
M[0] = m_old[0] + (t_step/2)*(dm[0] + dm_int[0]);
M[1] = m_old[1] + (t_step/2)*(dm[1] + dm_int[1]);
M[2] = m_old[2] + (t_step/2)*(dm[2] + dm_int[2]);
m[0] = M[0]/sqrt(M[0]*M[0]+M[1]*M[1]+M[2]*M[2]);
m[1] = M[1]/sqrt(M[0]*M[0]+M[1]*M[1]+M[2]*M[2]);
m[2] = M[2]/sqrt(M[0]*M[0]+M[1]*M[1]+M[2]*M[2]);
// Update final values for next step
/*********** edition for NDR starts here ***********/
d_Rmtj = 1/(Gt*(1+(TMR/(TMR+2))*(m[0]*P[0]+m[1]*P[1]+m[2]*P[2]))+Gsi) - R;
/*********** edition for NDR ends here ***********/
R = 1/(Gt*(1+(TMR/(TMR+2))*(m[0]*P[0]+m[1]*P[1]+m[2]*P[2]))+Gsi);
}
if( initial_state ==0){
if( R >= Rp*(1+TMR/2)){
writeSuccess[this_id*trials_p_thread + i_trial]=1;
}
else{
writeSuccess[this_id*trials_p_thread + i_trial]=0;
}
}
else {
if( R <= Rp*(1+TMR/2)){
writeSuccess[this_id*trials_p_thread + i_trial]=1;
}
else{
writeSuccess[this_id*trials_p_thread + i_trial]=0;
}
}
//Recording switching energy and voltage after switching
g_Energy[this_id * trials_p_thread+i_trial] = energy;
if(isNDR <=1){
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr;
}
else if(isNDR == 2){
if(i_read == 1){
if(initial_state == 1){
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr+Vmos+V -g_EndVndr[this_id * trials_p_thread+i_trial];
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] -= Vndr+Vmos+V;
}
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr+Vmos+V;
}
}
else if (isNDR == 3){
if(i_read == 1){
if(initial_state == 0){
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr - g_EndVndr[this_id * trials_p_thread+i_trial];
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] -= Vndr;
}
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr;
}
}
}// end for i_read for
}// end i_trial for
}//end function
| b54fd4dd6586f9caf47c8c88fa1e9292d2d6f5a4.cu | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include </u/local/cuda/5.0/include/cuda.h>
#include </u/local/cuda/5.0/include/cuda_runtime.h>
#include </u/local/cuda/5.0/include/curand_kernel.h>
#include "./NDR_Solver.cu"
using namespace std;
#define VARIATION
//#define OUTPUT_DETAIL
#define shaodi_pi 3.1415926
#define CUDA_CALL(x) do { if( (x) ! = cudaSuccess ){\
printf("Error at %s:%d\n",__FILE__,__LINE__ );\
exit(1);} } while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) {\
printf("Error at %s:%d\n",__FILE__,__LINE__);\
exit(1);}} while(0)
#ifndef OUTPUT_DETAIL
__global__ void LLG(double* g_v_para, double* writeSuccess, int initialstate, double t_step,
int trials_p_thread, bool isPS, double ori_length, double sigma_l, double ori_width, double sigma_w,
double ori_tfl, double sigma_tfl, double sigma_mgo, double ori_Nx, double ori_Ny, double ori_Nz,
double* g_lin_dep_factor, int isNDR, const double* VIGndr, const double* VIGmos, double* g_Energy,
double* g_SwitchingTime, double* g_EndVndr, double Cload ){
#endif
#ifdef OUTPUT_DETAIL
__global__ void LLG(double* g_v_para, double* writeSuccess, int initialstate, double t_step,
int trials_p_thread, bool isPS, double ori_length, double sigma_l, double ori_width,
double sigma_w, double ori_tfl, double sigma_tfl, double sigma_mgo, double ori_Nx,
double ori_Ny, double ori_Nz, double* g_lin_dep_factor, int isNDR, const double* VIGndr,
const double* VIGmos, double* g_Energy, double* g_SwitchingTime, double* g_EndVndr, double Cload,
double* g_NDRturn, double* g_initialR, double* g_NDRoff){
#endif
/* -------------------------------------------
Input Parameters From User
-------------------------------------------*/
int this_id = (blockIdx.x * blockDim.x + threadIdx.x) ;
//initiate state for following random generation
curandState_t localState;
curand_init(this_id, this_id, 0, &localState);
double Nx = ori_Nx;//origin: // x Demagnetization factor
double Ny = ori_Ny; // y Demagnetization factor
double Nz = ori_Nz; // z Demagnetization factor
//Parameter calculation
double length = ori_length; //length of MTJ
double width = ori_width; //width of MTJ
double Rp0 = 2e3;
double dMgO_a = 1.54e-3, dMgO_b = 1.1537278e10;//origin:9.24e9;
double Area = shaodi_pi*length*width/4; // Area without variation
double dMgO_base = (log(Rp0 * Area * 10e12) - log(dMgO_a)) / dMgO_b; // MgO thickness [m]
double TMR = 1.5; // TMR at zero bias voltage
double Rap0 = Rp0 *( 1+TMR);
double Temperature = g_v_para[11];//27+273; // Temperature
double pulse_width = g_v_para[1];
double V_p = g_v_para[2], V_ap = g_v_para[3];
double sigma_V_p = g_v_para[4], sigma_V_ap = g_v_para[5];
if( sigma_V_p == 0){
sigma_V_p = 1e-9;
}
double mean_tr = g_v_para[6], sigma_tr = g_v_para[7], mean_tf = g_v_para[8], sigma_tf= g_v_para[9], delay_time = 0e-9, sense_time = g_v_para[10];
int n_sim = (pulse_width+delay_time + sense_time)/t_step ;//Simulation time
#ifdef OUTPUT_DETAIL
double peak_voltage = Peak_voltage(VIGndr);
double peak_current = IG_V(peak_voltage,VIGndr,1);
#endif
/* -------------------------------------------
Constants
-------------------------------------------*/
double hbar = 1.05457173e-34; // Reduced Planck constant, [J*s]
double k = 1.3806488e-23; // Boltzmann constant, [J/K]
double u0 = 4e-7*shaodi_pi; // Vacuum permeability, [V�s/(A�m)]
double q = 1.60217657e-19; // Electron charge, [C]
double alphac = 0.02; // LLGE damping factor
double gammap = (221276/(1+pow(alphac,2))); // Gyromagnetic ratio [m/(A x s)]
double T0 = 1120;
double Ms0 = 1393128.323;//origin:1.44e6;
double Ki0 =1.479036e-3;//origin:1.46e-3;
double Xi0 = 0; //53.39247e-15; //origin:58.9e-15;
if(isPS) Xi0 = 53.39247e-15;
double P_tunnel = 0.2; // the polarization of the tunnel currentdouble
double Pol = 1; // Polarization for Spin Torque
/******************simulation trials *************/
for( int i_trial = 0; i_trial < trials_p_thread; i_trial++){
length = ori_length; //length of MTJ
width = ori_width; //width of MTJ
double tfl = ori_tfl; //thickness of free layer
double rise_time = mean_tr;
double fall_time = mean_tf;
#ifdef VARIATION
//Dimention variation
rise_time += sigma_tr*curand_normal_double(&localState);
fall_time += sigma_tf*curand_normal_double(&localState);
double v_variation = sigma_V_p * curand_normal_double(&localState);
length = ori_length + sigma_l*curand_normal_double(&localState);
width = ori_width + sigma_w*curand_normal_double(&localState);
tfl = ori_tfl + sigma_tfl*curand_normal_double(&localState);
double dMgO = dMgO_base + sigma_mgo*curand_normal_double(&localState);
double temp_Nx = ori_Nx + g_lin_dep_factor[0]*(length-ori_length) + g_lin_dep_factor[1] * ( width - ori_width) + g_lin_dep_factor[2] * (tfl - ori_tfl) ;
double temp_Ny = ori_Ny + g_lin_dep_factor[3]*(length-ori_length) + g_lin_dep_factor[4] * ( width - ori_width) + g_lin_dep_factor[5] * (tfl - ori_tfl) ;
double temp_Nz = ori_Nz + g_lin_dep_factor[6]*(length-ori_length) + g_lin_dep_factor[7] * ( width - ori_width) + g_lin_dep_factor[8] * (tfl - ori_tfl) ;
Nx = temp_Nx / (temp_Nx + temp_Ny + temp_Nz);
Ny = temp_Ny / (temp_Nx + temp_Ny + temp_Nz);
Nz = temp_Nz / (temp_Nx + temp_Ny + temp_Nz);
#endif
Area = shaodi_pi*length*width/4; // Area without variation
double areamtj = Area ; // MTJ area [m^2]
double Rp = exp(dMgO * dMgO_b)*dMgO_a / (Area * 10e12);
double Rap = (1+TMR)*Rp; // Anti-parallel resistance [Ohms]
double B1 = 0;//origin 0.2 // Field-like torque linear parameter [unitless]
double B2 = 0;//origin 0.02; // Field-like torque quadratic parameter [1/A]
int sim_per_trial = 1;
if(isNDR >=2){ // read mode, simulate the read margin by read both ap and p MTJ
sim_per_trial = 2;
}
for (int i_read = 0; i_read < sim_per_trial; i_read ++){
int initial_state = initialstate; // Inital state [0 = parallel, 1 = anti-parallel]
if ( i_read == 1){ // simulate read for the other state
initial_state = 1 - initial_state;
}
double P [3] = {0, 0, -1}; // Direction of polarization
double Ext [3] = {0, 0, 0}; // External magnetic field [A/m] - 1 oersted [Oe] = 79.5774715459424 ampere/meter [A/m]
double Ms = Ms0 * ( 1 - pow(Temperature/T0,1.5)); // Saturation magnetization [A/m] - 1e6 A/m = 1000 emu/cc
double dstray = 20e-9, tstray = 1.164656e-9;
//double Ext[3] = {-Ms*length*width/4/shaodi_pi*((dstray+tstray)/(pow(length/2,2)*sqrt(pow(length/2,2)+pow(dstray+tstray,2)))-(dstray-tstray)/(pow(length/2,2)*sqrt(pow(length/2,2)+pow(dstray-tstray,2)))),0,0};
double Ki = Ki0 * pow(Ms/Ms0, 2.18); // Anisotropy field constant [J/m^2]
double Xi = Xi0* pow(Ms/Ms0, 2.83); // VCMA field constant [J/(V x m)]
double Gt = 1/(Rp*(1+(TMR/(TMR+2)))); // Direct elastic tunneling conductance [S]
double KiPF = (2*Ki)/(tfl*u0*Ms); // Prefactor for interface anisotropy effective field
double VCMAPF = (2*Xi)/(u0*Ms*dMgO*tfl); // Prefactor for VCMA effective field
double Gsi = 0; // Conductance due to imperfections in Mgo [S]
//double Jc0 = (2*Ms*tfl*q*u0)/(hbar*Pol); // Normalization Constant for Current Density
double volume = areamtj*tfl; // MTJ volume [m^3]
double Hth = sqrt((2*k*Temperature*alphac)/(u0*gammap*Ms*volume*t_step)); // Amplitude of Thermal Field
/* -------------------------------------------
Internal Variables
-------------------------------------------*/
double costheta = 0; // the angle between the magnization of free and reference layers
double g_sv = 0; // the polarization efficiency in spin valve
double g_tunnel = 0; // the polarization efficiency in tunnel current
//double m_old [3] = {0, 0, 0}; // Normalized previous magnetization
double Heff_old [3] = {0, 0, 0}; // Previous Heff components [A/m]
double m_int [3] = {0, 0, 0}; // Intermediate normalized magnetization
double dm_int [3] = {0, 0, 0}; // Intermediate derivative of magnetization
double M_int [3] = {0, 0, 0}; // Intermediate denormalized magnetization
//double Heff_int [3] = {0, 0, 0}; // Intermediate Heff components [A/m]
double dm [3] = {0, 0, 0}; // Time derivative of magnetization [1/s]
double M [3] = {0, 0, 0}; // Denormalized magnetization
double mcrossp_int [3] = {0, 0, 0}; // Intermediate cross product components (m x p)
double mcrossHeff_int [3] = {0, 0, 0}; // Intermediate cross product components (m x Heff)
double mcrossHth_int [3] = {0, 0, 0}; // Intermediate cross product components (m x Hth)
double mcrossmcrossp_int [3] = {0, 0, 0}; // Intermediate double cross product components (m x m x p)
double mcrossmcrossHeff_int [3] = {0, 0, 0}; // Intermediate double cross product components (m x m x Heff)
double mcrossp [3] = {0, 0, 0}; // Cross product components (m x p)
double mcrossHeff [3] = {0, 0, 0}; // Cross product components (m x Heff)
double mcrossHth [3] = {0, 0, 0}; // Cross product components (m x Hth)
double mcrossmcrossp [3] = {0, 0, 0}; // Cross product components (m x m x p)
double mcrossmcrossHeff [3] = {0, 0, 0}; // Cross product components (m x m x Heff)
double randomHth [3] = {0, 0, 0}; // Vector of random variables
double STT = 0; // Strenght of STT term
double FLT = 0; // Strenght of FLT term
// -------------------------------------------
// Initialize Variables
// -------------------------------------------
double m [3] = {0, 0, 1}; // Normalized mangetization
double R = Rap; // MTJ resistance [Ohms]
if(initial_state != 1){
R = Rp; // MTJ resistance [Ohms]
m[2] = -1; // Normalized mangetization
}
double J = 0; // Current density [A/m^2]
double V = 0; // MTJ Voltage [V]
double V_offset = 0;
/*********** edition for NDR starts here ***********/
//The parameters for calculating NDR
double Vndr = 0;
double Imtj = 0; //current through MTJ and nmos
double Vmos = 0;
double d_Rmtj = 0; // delta Rmtj
double d_Imtj = 0; // delta Imtj
double Indr = 0; //current through NDR
double d_Vndr = 0; //
double Csline = Cload;
double Cbline = Cload;
double d_vdd =0, new_vdd =0, vdd =0;
V = 0; // mtj voltage
#ifdef OUTPUT_DETAIL
bool isNDRturn = false, isNDRoff = false;
g_NDRturn [this_id*trials_p_thread + i_trial] = 0;
g_NDRoff [this_id*trials_p_thread + i_trial] = 0;
g_initialR [this_id*trials_p_thread + i_trial] = R;
#endif
if(isNDR == 1){ //NDR write
Vndr = Solve_stable_vndr(VIGndr, VIGmos, R, V_ap);
Indr = IG_V(Vndr, VIGndr,1);
Imtj = Indr;
Vmos = V_I(Imtj,VIGmos);
vdd = V_ap;
}
double energy = V_ap*V_ap*Cload; // Pre-charge energy
bool isSwitched = false;
g_SwitchingTime[this_id * trials_p_thread+i_trial] = pulse_width;
/*********** edition for NDR ends here ***********/
for(int i=1;i<=n_sim;i++){
// Update values
double m_old [3] = {m[0], m[1], m[2]};
// Update voltage/current density
double V_ub = V_ap + V_offset;
#ifdef VARIATION
V_ub += v_variation * ( 1 + (sigma_V_ap/sigma_V_p - 1) * (R - Rp0)/(Rap0 - Rp0) ) ;
#endif
double curr_time = i * t_step;
if(curr_time < delay_time || curr_time > delay_time + pulse_width){
new_vdd = 0;
}
else{
if(curr_time < delay_time + rise_time){
new_vdd = (curr_time - delay_time)/rise_time * V_ub;
}
else{
if(curr_time <= delay_time + pulse_width - fall_time){
new_vdd = V_ub;
}
else{
new_vdd = V_ub * ( delay_time + pulse_width - curr_time) / fall_time;
}
}
}
d_vdd = new_vdd - vdd;
vdd = new_vdd;
//NDR calculation
/*********** edition for NDR starts here ***********/
#ifdef OUTPUT_DETAIL
if(isNDR==1){
if( !isNDRturn && abs(Vndr) > abs(peak_voltage)){
g_NDRturn [this_id*trials_p_thread + i_trial] = R;
isNDRturn = true;
}
if( isNDRturn && !isNDRoff && abs(Indr) < 0.25*abs(peak_current)){
g_NDRoff [this_id*trials_p_thread + i_trial] = R;
isNDRoff = true;
}
} else if(isNDR>=2){
if( !isNDRturn && abs(Vndr) > abs(peak_voltage)){
g_NDRturn [this_id*trials_p_thread + i_trial] = curr_time;
isNDRturn = true;
}
if (!isNDRoff && curr_time >= delay_time + pulse_width+sense_time){
// if (!isNDRoff && i>=1){
isNDRoff = true;
// g_initialR [this_id*trials_p_thread + i_trial] = d_Vndr;
// g_NDRturn [this_id*trials_p_thread + i_trial] = Imtj;
g_NDRoff [this_id*trials_p_thread + i_trial] = Vndr + V + Vmos;
}
}
#endif
if(isNDR==1 ){ //NDR write for AP-MTJ
//Solve the series of one NDR, one MTJ and one MOS with cap at MTJ
d_Imtj = ( t_step*Indr - t_step*Imtj - Cload*Imtj*d_Rmtj) / ( Cload/IG_V(Vmos,VIGmos,2) + Cload*R);
Imtj += d_Imtj;
Vmos = V_I(Imtj, VIGmos);
Vndr = vdd - Imtj*R - Vmos;
Indr = IG_V(Vndr,VIGndr,1);
V = Imtj*R;
energy += vdd * Indr * t_step;
}
else if(isNDR == 3){ // NDR read
if(curr_time < delay_time+pulse_width){ // precharging
//solve equation: Imtj = (vdd - Vmos(Imtj) - Vndr)/Rmtj = d(Csline*Vndr)/dt + Indr(Vndr)
Vmos = V_I(Imtj,VIGmos);
d_Imtj = (d_vdd - d_Vndr - d_Rmtj*Imtj)/(R+1/IG_V(Vmos,VIGmos,2));
Imtj += d_Imtj;
Indr = IG_V(Vndr,VIGndr,1);
d_Vndr = ( Imtj - Indr ) * t_step / Csline;
Vndr += d_Vndr;
V = Imtj*R;
energy += vdd * Imtj * t_step;
}
else{ // discharging state
//Solve equation: - d ((Imtj*R + Vmos(Imtj) + Vndr)*Cbline )/dt = Imtj = d(Vndr*Csline)/dt + Indr(Vndr)
d_Imtj = (-Imtj*t_step - Cbline*Imtj*d_Rmtj -d_Vndr*Cbline) / ( Cbline/IG_V(Vmos,VIGmos,2) + Cbline*R);
Imtj += d_Imtj;
Vmos = V_I(Imtj,VIGmos);
d_Vndr = (Imtj - Indr) * t_step / Csline;
Vndr += d_Vndr;
Indr = IG_V(Vndr,VIGndr,1);
V = Imtj*R;
}
}
else if (isNDR == 2 || isNDR == 0){
if(curr_time < delay_time+pulse_width){ // precharging
//solve equation: Imtj = (vdd - Vmos(Imtj) )/Rmtj = 0
d_Imtj = (d_vdd-d_Rmtj*Imtj)/(R+1/IG_V(Vmos,VIGmos,2));
Imtj += d_Imtj;
V = Imtj*R;
Vmos = vdd - V;
Indr = Imtj + R*Cbline*d_Imtj/t_step + Imtj*Cbline*d_Rmtj/t_step + Cbline/IG_V(Vmos,VIGmos,2)*d_Imtj/t_step;//The total I but not current of ndr, because there is no ndr
energy += vdd * Indr * t_step;
}
else{ // discharging state
//Solve equation: - d ((Imtj*R + Vmos(Imtj) )*Cbline )/dt = Imtj
d_Imtj = (-Imtj*t_step - Cbline*Imtj*d_Rmtj ) / ( Cbline/IG_V(Vmos,VIGmos,2) + Cbline*R);
Imtj += d_Imtj;
Vmos = V_I(Imtj,VIGmos);
V = Imtj*R;
}
}
// else{
//Solve the series of one MTJ and one MOS with cap at MTJ
// if(curr_time < delay_time+pulse_width){
// d_Imtj = (d_vdd-d_Rmtj*Imtj)/(R+1/IG_V(Vmos,VIGmos,2));
// Imtj += d_Imtj;
// V = Imtj*R;
// Vmos = vdd - V;
// Indr = Imtj + R*Cload*d_Imtj/t_step + Imtj*Cload*d_Rmtj/t_step + Cload/IG_V(Vmos,VIGmos,2)*d_Imtj/t_step;//The total I but not current of ndr, because there is no ndr
// energy += vdd * Indr * t_step;
// }
//Test whether switched
if(!isSwitched){
if( (initial_state ==0 && R >= Rp*(1+TMR/2)) || ( initial_state ==1 && R <= Rp*(1+TMR/2)) ){
isSwitched = true;
g_SwitchingTime[this_id * trials_p_thread+i_trial] = curr_time;
}
}
/*********** edition for NDR ends here ***********/
// Update effective magnetic field Heff_old
Heff_old[0] = Ext[0]-Ms*Nx*m_old[0];
Heff_old[1] = Ext[1]-Ms*Ny*m_old[1];
Heff_old[2] = Ext[2]-Ms*Nz*m_old[2]+(KiPF*m_old[2]-VCMAPF*m_old[2]*V);
//Calculate STT factor
J = V/(R*areamtj);
// costheta = m_old[0]*P[0] + m_old[1]*P[1] + m_old[2]*P[2];
// g_tunnel = 1/2 * P_tunnel / ( 1 + pow(P_tunnel,2)*costheta);
// g_sv = 1 / ( -4 + pow(( 1 / sqrt(Pol) + sqrt(Pol) ), 3) * (3 + costheta) / 4);
// STT = gammap*J* hbar*(g_tunnel+g_sv)/(2*Ms*tfl*q*u0);
STT = Pol*gammap*J* hbar/(2*Ms*tfl*q*u0);
//STT = gammap*J/Jc0;
FLT = STT*B1+STT*B2*areamtj*J;
// Calculate m x Hth
mcrossHth_int[0]=m_old[1]*randomHth[2]-m_old[2]*randomHth[1];
mcrossHth_int[1]=m_old[2]*randomHth[0]-m_old[0]*randomHth[2];
mcrossHth_int[2]=m_old[0]*randomHth[1]-m_old[1]*randomHth[0];
// Calculate m x p and m x m x p
mcrossp_int[0]=m_old[1]*P[2]-m_old[2]*P[1];
mcrossp_int[1]=m_old[2]*P[0]-m_old[0]*P[2];
mcrossp_int[2]=m_old[0]*P[1]-m_old[1]*P[0];
mcrossmcrossp_int[0]=m_old[1]*mcrossp_int[2]-m_old[2]*mcrossp_int[1];
mcrossmcrossp_int[1]=m_old[2]*mcrossp_int[0]-m_old[0]*mcrossp_int[2];
mcrossmcrossp_int[2]=m_old[0]*mcrossp_int[1]-m_old[1]*mcrossp_int[0];
// Calculate m x Heff and m x m x Heff
mcrossHeff_int[0]=m_old[1]*Heff_old[2]-m_old[2]*Heff_old[1];
mcrossHeff_int[1]=m_old[2]*Heff_old[0]-m_old[0]*Heff_old[2];
mcrossHeff_int[2]=m_old[0]*Heff_old[1]-m_old[1]*Heff_old[0];
mcrossmcrossHeff_int[0]=m_old[1]*mcrossHeff_int[2]-m_old[2]*mcrossHeff_int[1];
mcrossmcrossHeff_int[1]=m_old[2]*mcrossHeff_int[0]-m_old[0]*mcrossHeff_int[2];
mcrossmcrossHeff_int[2]=m_old[0]*mcrossHeff_int[1]-m_old[1]*mcrossHeff_int[0];
// Use the LLG equation w/ Heun's Method to update the magnetization
dm_int[0] = -gammap*(mcrossHeff_int[0]+mcrossHth_int[0]) - gammap*alphac*mcrossmcrossHeff_int[0] + STT*mcrossmcrossp_int[0] + FLT*mcrossp_int[0];
dm_int[1] = -gammap*(mcrossHeff_int[1]+mcrossHth_int[1]) - gammap*alphac*mcrossmcrossHeff_int[1] + STT*mcrossmcrossp_int[1] + FLT*mcrossp_int[1];
dm_int[2] = -gammap*(mcrossHeff_int[2]+mcrossHth_int[2]) - gammap*alphac*mcrossmcrossHeff_int[2] + STT*mcrossmcrossp_int[2] + FLT*mcrossp_int[2];
M_int[0] = m_old[0] + (dm_int[0]*t_step);
M_int[1] = m_old[1] + (dm_int[1]*t_step);
M_int[2] = m_old[2] + (dm_int[2]*t_step);
m_int[0] = M_int[0]/sqrt(M_int[0]*M_int[0]+M_int[1]*M_int[1]+M_int[2]*M_int[2]);
m_int[1] = M_int[1]/sqrt(M_int[0]*M_int[0]+M_int[1]*M_int[1]+M_int[2]*M_int[2]);
m_int[2] = M_int[2]/sqrt(M_int[0]*M_int[0]+M_int[1]*M_int[1]+M_int[2]*M_int[2]);
// Update the thermal field and current values (time evolves)
double2 gen_x12;
double gen_x3;
gen_x12 = curand_normal2_double(&localState);
randomHth[0] = Hth*gen_x12.x;
randomHth[1] = Hth*gen_x12.y;
gen_x3 = curand_normal_double(&localState);
randomHth[2] = Hth*gen_x3;
//STT calculation
// costheta = m_int[0]*P[0] + m_int[1]*P[1] + m_int[2]*P[2];
// g_tunnel = 1/2 * P_tunnel / ( 1 + pow(P_tunnel,2)*costheta);
// g_sv = 1 / ( -4 + pow(( 1 / sqrt(Pol) + sqrt(Pol) ), 3) * (3 + costheta) / 4);
// STT = gammap*J* hbar*(g_tunnel+g_sv)/(2*Ms*tfl*q*u0);
STT = Pol*gammap*J* hbar/(2*Ms*tfl*q*u0);
//STT = gammap*J/Jc0;
FLT = STT*B1+STT*B2*areamtj*J;
// Update intermediate effective magnetic field Heff
double Heff_int [3] = {Ext[0]-Ms*Nx*m_int[0], Ext[1]-Ms*Ny*m_int[1], Ext[2]-Ms*Nz*m_int[2]+(KiPF*m_int[2]-VCMAPF*m_int[2]*V)};
// Calculate m x Hth
mcrossHth[0]=m_int[1]*randomHth[2]-m_int[2]*randomHth[1];
mcrossHth[1]=m_int[2]*randomHth[0]-m_int[0]*randomHth[2];
mcrossHth[2]=m_int[0]*randomHth[1]-m_int[1]*randomHth[0];
// Calculate m x p and m x m x p
mcrossp[0]=m_int[1]*P[2]-m_int[2]*P[1];
mcrossp[1]=m_int[2]*P[0]-m_int[0]*P[2];
mcrossp[2]=m_int[0]*P[1]-m_int[1]*P[0];
mcrossmcrossp[0]=m_int[1]*mcrossp[2]-m_int[2]*mcrossp[1];
mcrossmcrossp[1]=m_int[2]*mcrossp[0]-m_int[0]*mcrossp[2];
mcrossmcrossp[2]=m_int[0]*mcrossp[1]-m_int[1]*mcrossp[0];
// Calculate m x Heff and m x m x Heff
mcrossHeff[0]=m_int[1]*Heff_int[2]-m_int[2]*Heff_int[1];
mcrossHeff[1]=m_int[2]*Heff_int[0]-m_int[0]*Heff_int[2];
mcrossHeff[2]=m_int[0]*Heff_int[1]-m_int[1]*Heff_int[0];
mcrossmcrossHeff[0]=m_int[1]*mcrossHeff[2]-m_int[2]*mcrossHeff[1];
mcrossmcrossHeff[1]=m_int[2]*mcrossHeff[0]-m_int[0]*mcrossHeff[2];
mcrossmcrossHeff[2]=m_int[0]*mcrossHeff[1]-m_int[1]*mcrossHeff[0];
// Now use intermediate value in final value computation
dm[0] = -gammap*(mcrossHeff[0]+mcrossHth[0]) - gammap*alphac*mcrossmcrossHeff[0] + STT*mcrossmcrossp[0] + FLT*mcrossp[0];
dm[1] = -gammap*(mcrossHeff[1]+mcrossHth[1]) - gammap*alphac*mcrossmcrossHeff[1] + STT*mcrossmcrossp[1] + FLT*mcrossp[1];
dm[2] = -gammap*(mcrossHeff[2]+mcrossHth[2]) - gammap*alphac*mcrossmcrossHeff[2] + STT*mcrossmcrossp[2] + FLT*mcrossp[2];
M[0] = m_old[0] + (t_step/2)*(dm[0] + dm_int[0]);
M[1] = m_old[1] + (t_step/2)*(dm[1] + dm_int[1]);
M[2] = m_old[2] + (t_step/2)*(dm[2] + dm_int[2]);
m[0] = M[0]/sqrt(M[0]*M[0]+M[1]*M[1]+M[2]*M[2]);
m[1] = M[1]/sqrt(M[0]*M[0]+M[1]*M[1]+M[2]*M[2]);
m[2] = M[2]/sqrt(M[0]*M[0]+M[1]*M[1]+M[2]*M[2]);
// Update final values for next step
/*********** edition for NDR starts here ***********/
d_Rmtj = 1/(Gt*(1+(TMR/(TMR+2))*(m[0]*P[0]+m[1]*P[1]+m[2]*P[2]))+Gsi) - R;
/*********** edition for NDR ends here ***********/
R = 1/(Gt*(1+(TMR/(TMR+2))*(m[0]*P[0]+m[1]*P[1]+m[2]*P[2]))+Gsi);
}
if( initial_state ==0){
if( R >= Rp*(1+TMR/2)){
writeSuccess[this_id*trials_p_thread + i_trial]=1;
}
else{
writeSuccess[this_id*trials_p_thread + i_trial]=0;
}
}
else {
if( R <= Rp*(1+TMR/2)){
writeSuccess[this_id*trials_p_thread + i_trial]=1;
}
else{
writeSuccess[this_id*trials_p_thread + i_trial]=0;
}
}
//Recording switching energy and voltage after switching
g_Energy[this_id * trials_p_thread+i_trial] = energy;
if(isNDR <=1){
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr;
}
else if(isNDR == 2){
if(i_read == 1){
if(initial_state == 1){
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr+Vmos+V -g_EndVndr[this_id * trials_p_thread+i_trial];
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] -= Vndr+Vmos+V;
}
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr+Vmos+V;
}
}
else if (isNDR == 3){
if(i_read == 1){
if(initial_state == 0){
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr - g_EndVndr[this_id * trials_p_thread+i_trial];
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] -= Vndr;
}
}
else{
g_EndVndr[this_id * trials_p_thread+i_trial] = Vndr;
}
}
}// end for i_read for
}// end i_trial for
}//end function
|
4b22d1e9fc6932dcf5cd0b1162297d5cebee2dc9.hip | // !!! This is a file automatically generated by hipify!!!
// Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <stdio.h>
#include <gflags/gflags.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <atomic>
#include <thread>
#include <algorithm>
#include <groute/internal/cuda_utils.h>
#include <groute/internal/worker.h>
#include <groute/internal/pinned_allocation.h>
#include <groute/event_pool.h>
#include <groute/groute.h>
#include "cc_context.h"
#include "cc_common.h"
#include "cc_config.h"
#include "cc_partitioner.h"
DEFINE_int32(edges_chunk, 0, "Define the chunk size for the dynamic workload of edges");
DEFINE_int32(parents_chunk, 0, "Define the chunk size for the dynamic reduction of parents");
DEFINE_int32(edge_segs, 0, "Define the number of edge segments");
DEFINE_int32(parent_segs, 0, "Define the number of parent segments");
DEFINE_int32(input_buffers, 0, "Define the number of buffers for the input dynamic workload");
DEFINE_int32(reduce_buffers, 0, "Define the number of buffers for the reduction dynamic workload");
DEFINE_int32(nonatomic_rounds, -1, "Define the number of non atomic rounds to run per input segment");
DEFINE_bool(vertex_partitioning, false, "Perform hirarchic biased vertex partitioning");
DEFINE_bool(auto_config, true, "Deduce configuration automatically");
DEFINE_double(compute_latency_ratio, 0.12, "Hint the compute_time / memory_latency ratio");
DEFINE_int32(degree_threshold, 8, "Threshold degree for the auto config");
DEFINE_bool(tree_topology, true, "Use an hierarchical log_2(n) tree topology");
DEFINE_bool(inverse_topology, false, "Inverse the top down topology (problem converges at the N'th device)");
#ifndef NDEBUG
#define MASYNC_BS 32
#else
#define MASYNC_BS 512
#endif
bool RunCCMAsyncAtomic(int ngpus)
{
cc::Context context(FLAGS_graphfile, FLAGS_ggr, FLAGS_verbose, ngpus);
cc::Configuration configuration;
if (FLAGS_auto_config)
cc::BuildConfigurationAuto( // Deduce the configuration automatically
ngpus, context.nedges, context.nvtxs,
FLAGS_compute_latency_ratio, FLAGS_degree_threshold,
FLAGS_nonatomic_rounds,
configuration
);
else
cc::BuildConfiguration(
ngpus, context.nedges, context.nvtxs,
FLAGS_edge_segs, FLAGS_parent_segs,
FLAGS_edges_chunk, FLAGS_parents_chunk,
FLAGS_input_buffers, FLAGS_reduce_buffers,
FLAGS_nonatomic_rounds, FLAGS_vertex_partitioning,
configuration);
if (FLAGS_verbose) configuration.print();
context.DisableFragmentation();
context.CacheEvents(
::max(configuration.input_pipeline_buffers, configuration.reduction_pipeline_buffers) /*raw estimation */);
double par_total_ms = 0.0, total_ms = 0.0;
for (size_t rep = 0; rep < FLAGS_repetitions; ++rep)
{
Stopwatch psw(true);
groute::Segment<Edge> all_edges = groute::Segment<Edge>(&context.host_edges[0], context.nedges, context.nedges, 0);
cc::EdgePartitioner partitioner(ngpus, context.nvtxs, all_edges, configuration.vertex_partitioning);
auto reduction_policy = FLAGS_tree_topology
? groute::router::Policy::CreateTreeReductionPolicy(ngpus)
: groute::router::Policy::CreateOneWayReductionPolicy(ngpus);
groute::router::Router<Edge> input_router(context, std::make_shared<cc::EdgeScatterPolicy>(ngpus));
groute::router::Router<int> reduction_router(context, reduction_policy);
groute::router::ISender<Edge>* host_sender = input_router.GetSender(groute::Device::Host);
groute::router::IReceiver<int>* host_receiver = reduction_router.GetReceiver(groute::Device::Host); // TODO
IntervalRangeMarker iter_rng(context.nedges, "begin");
for (auto& edge_partition : partitioner.edge_partitions)
{
host_sender->Send(edge_partition, groute::Event());
}
host_sender->Shutdown();
psw.stop();
par_total_ms += psw.ms();
std::vector< std::unique_ptr<cc::Problem> > problems;
std::vector< std::unique_ptr<cc::Solver> > solvers;
std::vector<std::thread> workers(ngpus);
dim3 block_dims(MASYNC_BS, 1, 1);
for (size_t i = 0; i < ngpus; ++i)
{
problems.emplace_back(new cc::Problem(context, partitioner.parents_partitions[i], i, block_dims));
solvers.emplace_back(new cc::Solver(context, *problems.back()));
solvers[i]->edges_in = groute::Link<Edge>(input_router, i, configuration.edges_chunk_size, configuration.input_pipeline_buffers);
solvers[i]->reduction_in = groute::Link<component_t>(reduction_router, i, configuration.parents_chunk_size, configuration.reduction_pipeline_buffers);
solvers[i]->reduction_out = groute::Link<component_t>(i, reduction_router);
}
for (size_t i = 0; i < ngpus; ++i)
{
// Sync the first copy operations (exclude from timing)
solvers[i]->edges_in.Sync();
}
groute::internal::Barrier barrier(ngpus + 1); // barrier for accurate timing
for (size_t i = 0; i < ngpus; ++i)
{
// Run workers
std::thread worker(
[&configuration, &barrier](cc::Solver& solver)
{
barrier.Sync();
barrier.Sync();
solver.Solve(configuration);
},
std::ref(*solvers[i]));
workers[i] = std::move(worker);
}
barrier.Sync();
Stopwatch sw(true); // all threads are running, start timing
barrier.Sync();
for (size_t i = 0; i < ngpus; ++i)
{
// Join threads
workers[i].join();
}
sw.stop();
total_ms += sw.ms();
// output is received from the drain device (by topology)
auto seg
= host_receiver
->Receive(groute::Buffer<int>(&context.host_parents[0], context.nvtxs), groute::Event())
.get();
seg.Sync();
}
if (FLAGS_verbose) printf("\nPartitioning (CPU): %f ms.", par_total_ms / FLAGS_repetitions);
printf("\nCC (Async): %f ms. <filter>\n\n", total_ms / FLAGS_repetitions);
return CheckComponents(context.host_parents, context.nvtxs);
}
bool TestCCAsyncMulti(int ngpus)
{
return RunCCMAsyncAtomic(ngpus);
}
| 4b22d1e9fc6932dcf5cd0b1162297d5cebee2dc9.cu | // Groute: An Asynchronous Multi-GPU Programming Framework
// http://www.github.com/groute/groute
// Copyright (c) 2017, A. Barak
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the names of the copyright holders nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include <stdio.h>
#include <gflags/gflags.h>
#include <utils/utils.h>
#include <utils/stopwatch.h>
#include <atomic>
#include <thread>
#include <algorithm>
#include <groute/internal/cuda_utils.h>
#include <groute/internal/worker.h>
#include <groute/internal/pinned_allocation.h>
#include <groute/event_pool.h>
#include <groute/groute.h>
#include "cc_context.h"
#include "cc_common.h"
#include "cc_config.h"
#include "cc_partitioner.h"
DEFINE_int32(edges_chunk, 0, "Define the chunk size for the dynamic workload of edges");
DEFINE_int32(parents_chunk, 0, "Define the chunk size for the dynamic reduction of parents");
DEFINE_int32(edge_segs, 0, "Define the number of edge segments");
DEFINE_int32(parent_segs, 0, "Define the number of parent segments");
DEFINE_int32(input_buffers, 0, "Define the number of buffers for the input dynamic workload");
DEFINE_int32(reduce_buffers, 0, "Define the number of buffers for the reduction dynamic workload");
DEFINE_int32(nonatomic_rounds, -1, "Define the number of non atomic rounds to run per input segment");
DEFINE_bool(vertex_partitioning, false, "Perform hirarchic biased vertex partitioning");
DEFINE_bool(auto_config, true, "Deduce configuration automatically");
DEFINE_double(compute_latency_ratio, 0.12, "Hint the compute_time / memory_latency ratio");
DEFINE_int32(degree_threshold, 8, "Threshold degree for the auto config");
DEFINE_bool(tree_topology, true, "Use an hierarchical log_2(n) tree topology");
DEFINE_bool(inverse_topology, false, "Inverse the top down topology (problem converges at the N'th device)");
#ifndef NDEBUG
#define MASYNC_BS 32
#else
#define MASYNC_BS 512
#endif
bool RunCCMAsyncAtomic(int ngpus)
{
cc::Context context(FLAGS_graphfile, FLAGS_ggr, FLAGS_verbose, ngpus);
cc::Configuration configuration;
if (FLAGS_auto_config)
cc::BuildConfigurationAuto( // Deduce the configuration automatically
ngpus, context.nedges, context.nvtxs,
FLAGS_compute_latency_ratio, FLAGS_degree_threshold,
FLAGS_nonatomic_rounds,
configuration
);
else
cc::BuildConfiguration(
ngpus, context.nedges, context.nvtxs,
FLAGS_edge_segs, FLAGS_parent_segs,
FLAGS_edges_chunk, FLAGS_parents_chunk,
FLAGS_input_buffers, FLAGS_reduce_buffers,
FLAGS_nonatomic_rounds, FLAGS_vertex_partitioning,
configuration);
if (FLAGS_verbose) configuration.print();
context.DisableFragmentation();
context.CacheEvents(
std::max(configuration.input_pipeline_buffers, configuration.reduction_pipeline_buffers) /*raw estimation */);
double par_total_ms = 0.0, total_ms = 0.0;
for (size_t rep = 0; rep < FLAGS_repetitions; ++rep)
{
Stopwatch psw(true);
groute::Segment<Edge> all_edges = groute::Segment<Edge>(&context.host_edges[0], context.nedges, context.nedges, 0);
cc::EdgePartitioner partitioner(ngpus, context.nvtxs, all_edges, configuration.vertex_partitioning);
auto reduction_policy = FLAGS_tree_topology
? groute::router::Policy::CreateTreeReductionPolicy(ngpus)
: groute::router::Policy::CreateOneWayReductionPolicy(ngpus);
groute::router::Router<Edge> input_router(context, std::make_shared<cc::EdgeScatterPolicy>(ngpus));
groute::router::Router<int> reduction_router(context, reduction_policy);
groute::router::ISender<Edge>* host_sender = input_router.GetSender(groute::Device::Host);
groute::router::IReceiver<int>* host_receiver = reduction_router.GetReceiver(groute::Device::Host); // TODO
IntervalRangeMarker iter_rng(context.nedges, "begin");
for (auto& edge_partition : partitioner.edge_partitions)
{
host_sender->Send(edge_partition, groute::Event());
}
host_sender->Shutdown();
psw.stop();
par_total_ms += psw.ms();
std::vector< std::unique_ptr<cc::Problem> > problems;
std::vector< std::unique_ptr<cc::Solver> > solvers;
std::vector<std::thread> workers(ngpus);
dim3 block_dims(MASYNC_BS, 1, 1);
for (size_t i = 0; i < ngpus; ++i)
{
problems.emplace_back(new cc::Problem(context, partitioner.parents_partitions[i], i, block_dims));
solvers.emplace_back(new cc::Solver(context, *problems.back()));
solvers[i]->edges_in = groute::Link<Edge>(input_router, i, configuration.edges_chunk_size, configuration.input_pipeline_buffers);
solvers[i]->reduction_in = groute::Link<component_t>(reduction_router, i, configuration.parents_chunk_size, configuration.reduction_pipeline_buffers);
solvers[i]->reduction_out = groute::Link<component_t>(i, reduction_router);
}
for (size_t i = 0; i < ngpus; ++i)
{
// Sync the first copy operations (exclude from timing)
solvers[i]->edges_in.Sync();
}
groute::internal::Barrier barrier(ngpus + 1); // barrier for accurate timing
for (size_t i = 0; i < ngpus; ++i)
{
// Run workers
std::thread worker(
[&configuration, &barrier](cc::Solver& solver)
{
barrier.Sync();
barrier.Sync();
solver.Solve(configuration);
},
std::ref(*solvers[i]));
workers[i] = std::move(worker);
}
barrier.Sync();
Stopwatch sw(true); // all threads are running, start timing
barrier.Sync();
for (size_t i = 0; i < ngpus; ++i)
{
// Join threads
workers[i].join();
}
sw.stop();
total_ms += sw.ms();
// output is received from the drain device (by topology)
auto seg
= host_receiver
->Receive(groute::Buffer<int>(&context.host_parents[0], context.nvtxs), groute::Event())
.get();
seg.Sync();
}
if (FLAGS_verbose) printf("\nPartitioning (CPU): %f ms.", par_total_ms / FLAGS_repetitions);
printf("\nCC (Async): %f ms. <filter>\n\n", total_ms / FLAGS_repetitions);
return CheckComponents(context.host_parents, context.nvtxs);
}
bool TestCCAsyncMulti(int ngpus)
{
return RunCCMAsyncAtomic(ngpus);
}
|
53808250d34c4636b365d069b2f216f4eefd3b96.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "linearAlgebraCUDA.cuh"
#define BLOCK_SIZE 32
//Kernels.
template<typename T> __global__ void MultipMatrices(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int colFirst, int colSecond)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
T sum = 0.0f;
if (row >= rowFirst || col >= colSecond)
{
return;
}
for (int i = 0; i < colFirst; i++)
{
sum += d_inFirst[row * colFirst + i] * d_inSecond[i * colSecond + col];
}
d_out[row * colSecond + col] = sum;
}
template<typename T> __global__ void MultipMatricesSH(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int colFirst, int colSecond)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ T shFirst[BLOCK_SIZE * BLOCK_SIZE];
__shared__ T shSecond[BLOCK_SIZE * BLOCK_SIZE];
T sum = 0.0f;
for (int i = 0; i < (BLOCK_SIZE + colFirst - 1) / BLOCK_SIZE; i++)
{
if (i * BLOCK_SIZE + threadIdx.x < colFirst && row < rowFirst)
{
shFirst[threadIdx.y * BLOCK_SIZE + threadIdx.x] = d_inFirst[row * colFirst + i * BLOCK_SIZE + threadIdx.x];
}
else
{
shFirst[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0.0f;
}
if (i * BLOCK_SIZE + threadIdx.y < colFirst && col < colSecond)
{
shSecond[threadIdx.y * BLOCK_SIZE + threadIdx.x] = d_inSecond[(i * BLOCK_SIZE + threadIdx.y) * colSecond + col];
}
else
{
shSecond[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0.0f;
}
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
{
sum += shFirst[threadIdx.y * BLOCK_SIZE + j] * shSecond[j * BLOCK_SIZE + threadIdx.x];
}
__syncthreads();
}
if (row < rowFirst && col < colSecond)
{
d_out[((blockIdx.y * blockDim.y + threadIdx.y) * colSecond) + (blockIdx.x * blockDim.x) + threadIdx.x] = sum;
}
}
template<typename T> __global__ void Transp(T* d_inMatrix, T* d_out, int rowMatrix, int colMatrix)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= rowMatrix || col >= colMatrix)
{
return;
}
d_out[col * rowMatrix + row] = d_inMatrix[row * colMatrix + col];
}
template<typename T> __global__ void TranspSH(T *d_inMatrix, T *d_out, int rowMatrix, int colMatrix)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ T shTile[BLOCK_SIZE * BLOCK_SIZE];
if (blockIdx.y * blockDim.y + threadIdx.y >= rowMatrix || blockIdx.x * blockDim.x + threadIdx.x >= colMatrix)
{
return;
}
shTile[threadIdx.y * BLOCK_SIZE + threadIdx.x] = d_inMatrix[row * colMatrix + col];
__syncthreads();
d_out[col * rowMatrix + row] = shTile[threadIdx.y * BLOCK_SIZE + threadIdx.x];
}
template<typename T> __global__ void OutPr(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int rowSecond)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= rowFirst || col >= rowSecond)
{
return;
}
d_out[row * rowSecond + col] = d_inFirst[row] * d_inSecond[col];
}
template<typename T> __global__ void OutPrSH(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int rowSecond)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ T shFirst[BLOCK_SIZE * BLOCK_SIZE];
__shared__ T shSecond[BLOCK_SIZE * BLOCK_SIZE];
T prod = 0.0f;
if (blockIdx.y * blockDim.y + threadIdx.y >= rowFirst || blockIdx.x * blockDim.x + threadIdx.x >= rowSecond)
{
return;
}
shFirst[row] = d_inFirst[blockIdx.y * blockDim.y + threadIdx.y];
shSecond[col] = d_inSecond[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
prod = shFirst[row] * shSecond[col];
d_out[(blockIdx.y * blockDim.y + threadIdx.y) * rowSecond + (blockIdx.x * blockDim.x + threadIdx.x)] = prod;
}
template<typename T> __global__ void VecDiff(T* d_inFirst, T* d_inSecond, T* d_out, int rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= rows)
{
return;
}
d_out[row] = d_inFirst[row] - d_inSecond[row];
}
template<typename T> __global__ void VecDiffSH(T* d_inFirst, T* d_inSecond, T* d_out, int rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T shFirst[BLOCK_SIZE * BLOCK_SIZE];
__shared__ T shSecond[BLOCK_SIZE * BLOCK_SIZE];
T diff = 0.0f;
if (row >= rows)
{
return;
}
shFirst[blockIdx.x * BLOCK_SIZE + threadIdx.x] = d_inFirst[row];
shSecond[blockIdx.x * BLOCK_SIZE + threadIdx.x] = d_inSecond[row];
__syncthreads();
diff = shFirst[blockIdx.x * BLOCK_SIZE + threadIdx.x] - shSecond[blockIdx.x * BLOCK_SIZE + threadIdx.x];
d_out[row] = diff;
}
//Functions.
template<typename T> void matricesDotProductGPU(T* h_first, const int& rowFirst, const int& colFirst, T* h_second, const int& rowSecond, const int& colSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = (rowFirst * colFirst) * sizeof(T);
const int BYSECOND = (rowSecond * colSecond) * sizeof(T);
const int BYRESULT = (rowFirst * colSecond) * sizeof(T);
hipMalloc((void**)&d_inFirst, BYFIRST);
hipMalloc((void**)&d_inSecond, BYSECOND);
hipMalloc((void**)&d_out, BYRESULT);
hipMemcpy(d_inFirst, h_first, BYFIRST, hipMemcpyHostToDevice);
hipMemcpy(d_inSecond, h_second, BYSECOND, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( MultipMatrices) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inFirst, d_inSecond, d_out, rowFirst, colFirst, colSecond);
hipMemcpy(h_result, d_out, BYRESULT, hipMemcpyDeviceToHost);
hipFree(d_inFirst);
hipFree(d_inSecond);
hipFree(d_out);
}
template<typename T> void matricesDotProductGPUSH(T* h_first, const int& rowFirst, const int& colFirst, T* h_second, const int& rowSecond, const int& colSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = (rowFirst * colFirst) * sizeof(T);
const int BYSECOND = (rowSecond * colSecond) * sizeof(T);
const int BYRESULT = (rowFirst * colSecond) * sizeof(T);
hipMalloc((void**)&d_inFirst, BYFIRST);
hipMalloc((void**)&d_inSecond, BYSECOND);
hipMalloc((void**)&d_out, BYRESULT);
hipMemcpy(d_inFirst, h_first, BYFIRST, hipMemcpyHostToDevice);
hipMemcpy(d_inSecond, h_second, BYSECOND, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( MultipMatricesSH) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inFirst, d_inSecond, d_out, rowFirst, colFirst, colSecond);
hipMemcpy(h_result, d_out, BYRESULT, hipMemcpyDeviceToHost);
hipFree(d_inFirst);
hipFree(d_inSecond);
hipFree(d_out);
}
template<typename T> void transposeGPU(T* h_matrix, const int& rowMatrix, const int& colMatrix, T* h_result)
{
T* d_inMatrix;
T* d_out;
const int BYTES = (rowMatrix * colMatrix) * sizeof(T);
hipMalloc((void**)&d_inMatrix, BYTES);
hipMalloc((void**)&d_out, BYTES);
hipMemcpy(d_inMatrix, h_matrix, BYTES, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colMatrix + dimBlock.x - 1) / dimBlock.x, (rowMatrix + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( Transp) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inMatrix, d_out, rowMatrix, colMatrix);
hipMemcpy(h_result, d_out, BYTES, hipMemcpyDeviceToHost);
hipFree(d_inMatrix);
hipFree(d_out);
}
template<typename T> void transposeGPUSH(T* h_matrix, const int& rowMatrix, const int& colMatrix, T* h_result)
{
T* d_inMatrix;
T* d_out;
const int BYTES = (rowMatrix * colMatrix) * sizeof(T);
hipMalloc((void**)&d_inMatrix, BYTES);
hipMalloc((void**)&d_out, BYTES);
hipMemcpy(d_inMatrix, h_matrix, BYTES, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colMatrix + dimBlock.x - 1) / dimBlock.x, (rowMatrix + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( TranspSH) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inMatrix, d_out, rowMatrix, colMatrix);
hipMemcpy(h_result, d_out, BYTES, hipMemcpyDeviceToHost);
hipFree(d_inMatrix);
hipFree(d_out);
}
template<typename T> void outerProdGPU(T* h_first, const int& rowFirst, T* h_second, const int& rowSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rowFirst * sizeof(T);
const int BYSECOND = rowSecond * sizeof(T);
const int BYRESULT = (rowFirst * rowSecond) * sizeof(T);
hipMalloc((void**)&d_inFirst, BYFIRST);
hipMalloc((void**)&d_inSecond, BYSECOND);
hipMalloc((void**)&d_out, BYRESULT);
hipMemcpy(d_inFirst, h_first, BYFIRST, hipMemcpyHostToDevice);
hipMemcpy(d_inSecond, h_second, BYSECOND, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((rowSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( OutPr) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inFirst, d_inSecond, d_out, rowFirst, rowSecond);
hipMemcpy(h_result, d_out, BYRESULT, hipMemcpyDeviceToHost);
hipFree(d_inFirst);
hipFree(d_inSecond);
hipFree(d_out);
}
template<typename T> void outerProdGPUSH(T* h_first, const int& rowFirst, T* h_second, const int& rowSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rowFirst * sizeof(T);
const int BYSECOND = rowSecond * sizeof(T);
const int BYRESULT = (rowFirst * rowSecond) * sizeof(T);
hipMalloc((void**)&d_inFirst, BYFIRST);
hipMalloc((void**)&d_inSecond, BYSECOND);
hipMalloc((void**)&d_out, BYRESULT);
hipMemcpy(d_inFirst, h_first, BYFIRST, hipMemcpyHostToDevice);
hipMemcpy(d_inSecond, h_second, BYSECOND, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((rowSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
hipLaunchKernelGGL(( OutPrSH) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inFirst, d_inSecond, d_out, rowFirst, rowSecond);
hipMemcpy(h_result, d_out, BYRESULT, hipMemcpyDeviceToHost);
hipFree(d_inFirst);
hipFree(d_inSecond);
hipFree(d_out);
}
template<typename T> void vectorsDiffGPU(T* h_first, T* h_second, const int& rows, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rows * sizeof(T);
const int BYSECOND = rows * sizeof(T);
const int BYRESULT = rows * sizeof(T);
hipMalloc((void**)&d_inFirst, BYFIRST);
hipMalloc((void**)&d_inSecond, BYSECOND);
hipMalloc((void**)&d_out, BYRESULT);
hipMemcpy(d_inFirst, h_first, BYFIRST, hipMemcpyHostToDevice);
hipMemcpy(d_inSecond, h_second, BYSECOND, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((rows + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( VecDiff) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inFirst, d_inSecond, d_out, rows);
hipMemcpy(h_result, d_out, BYRESULT, hipMemcpyDeviceToHost);
hipFree(d_inFirst);
hipFree(d_inSecond);
hipFree(d_out);
}
template<typename T> void vectorsDiffGPUSH(T* h_first, T* h_second, const int& rows, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rows * sizeof(T);
const int BYSECOND = rows * sizeof(T);
const int BYRESULT = rows * sizeof(T);
hipMalloc((void**)&d_inFirst, BYFIRST);
hipMalloc((void**)&d_inSecond, BYSECOND);
hipMalloc((void**)&d_out, BYRESULT);
hipMemcpy(d_inFirst, h_first, BYFIRST, hipMemcpyHostToDevice);
hipMemcpy(d_inSecond, h_second, BYSECOND, hipMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((rows + dimBlock.x - 1) / dimBlock.x);
hipLaunchKernelGGL(( VecDiffSH) , dim3(dimGrid), dim3(dimBlock), 0, 0, d_inFirst, d_inSecond, d_out, rows);
hipMemcpy(h_result, d_out, BYRESULT, hipMemcpyDeviceToHost);
hipFree(d_inFirst);
hipFree(d_inSecond);
hipFree(d_out);
}
//Forced template instantiations.
//float
template __global__ void MultipMatrices(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(float* d_inMatrix, float* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(float *d_inMatrix, float *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(float* d_inFirst, float* d_inSecond, float* d_out, int rows);
template __global__ void VecDiffSH(float* d_inFirst, float* d_inSecond, float* d_out, int rows);
template void matricesDotProductGPU(float* h_first, const int& rowFirst, const int& colFirst, float* h_second, const int& rowSecond, const int& colSecond, float* h_result);
template void matricesDotProductGPUSH(float* h_first, const int& rowFirst, const int& colFirst, float* h_second, const int& rowSecond, const int& colSecond, float* h_result);
template void transposeGPU(float* h_matrix, const int& rowMatrix, const int& colMatrix, float* h_result);
template void transposeGPUSH(float* h_matrix, const int& rowMatrix, const int& colMatrix, float* h_result);
template void outerProdGPU(float* h_first, const int& rowFirst, float* h_second, const int& rowSecond, float* h_result);
template void outerProdGPUSH(float* h_first, const int& rowFirst, float* h_second, const int& rowSecond, float* h_result);
template void vectorsDiffGPU(float* h_first, float* h_second, const int& rows, float* h_result);
template void vectorsDiffGPUSH(float* h_first, float* h_second, const int& rows, float* h_result);
//double
template __global__ void MultipMatrices(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(double* d_inMatrix, double* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(double *d_inMatrix, double *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(double* d_inFirst, double* d_inSecond, double* d_out, int rows);
template __global__ void VecDiffSH(double* d_inFirst, double* d_inSecond, double* d_out, int rows);
template void matricesDotProductGPU(double* h_first, const int& rowFirst, const int& colFirst, double* h_second, const int& rowSecond, const int& colSecond, double* h_result);
template void matricesDotProductGPUSH(double* h_first, const int& rowFirst, const int& colFirst, double* h_second, const int& rowSecond, const int& colSecond, double* h_result);
template void transposeGPU(double* h_matrix, const int& rowMatrix, const int& colMatrix, double* h_result);
template void transposeGPUSH(double* h_matrix, const int& rowMatrix, const int& colMatrix, double* h_result);
template void outerProdGPU(double* h_first, const int& rowFirst, double* h_second, const int& rowSecond, double* h_result);
template void outerProdGPUSH(double* h_first, const int& rowFirst, double* h_second, const int& rowSecond, double* h_result);
template void vectorsDiffGPU(double* h_first, double* h_second, const int& rows, double* h_result);
template void vectorsDiffGPUSH(double* h_first, double* h_second, const int& rows, double* h_result);
//int
template __global__ void MultipMatrices(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(int* d_inMatrix, int* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(int *d_inMatrix, int *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(int* d_inFirst, int* d_inSecond, int* d_out, int rows);
template __global__ void VecDiffSH(int* d_inFirst, int* d_inSecond, int* d_out, int rows);
template void matricesDotProductGPU(int* h_first, const int& rowFirst, const int& colFirst, int* h_second, const int& rowSecond, const int& colSecond, int* h_result);
template void matricesDotProductGPUSH(int* h_first, const int& rowFirst, const int& colFirst, int* h_second, const int& rowSecond, const int& colSecond, int* h_result);
template void transposeGPU(int* h_matrix, const int& rowMatrix, const int& colMatrix, int* h_result);
template void transposeGPUSH(int* h_matrix, const int& rowMatrix, const int& colMatrix, int* h_result);
template void outerProdGPU(int* h_first, const int& rowFirst, int* h_second, const int& rowSecond, int* h_result);
template void outerProdGPUSH(int* h_first, const int& rowFirst, int* h_second, const int& rowSecond, int* h_result);
template void vectorsDiffGPU(int* h_first, int* h_second, const int& rows, int* h_result);
template void vectorsDiffGPUSH(int* h_first, int* h_second, const int& rows, int* h_result);
//unsigned int
template __global__ void MultipMatrices(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(unsigned int* d_inMatrix, unsigned int* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(unsigned int *d_inMatrix, unsigned int *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rows);
template __global__ void VecDiffSH(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rows);
template void matricesDotProductGPU(unsigned int* h_first, const int& rowFirst, const int& colFirst, unsigned int* h_second, const int& rowSecond, const int& colSecond, unsigned int* h_result);
template void matricesDotProductGPUSH(unsigned int* h_first, const int& rowFirst, const int& colFirst, unsigned int* h_second, const int& rowSecond, const int& colSecond, unsigned int* h_result);
template void transposeGPU(unsigned int* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned int* h_result);
template void transposeGPUSH(unsigned int* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned int* h_result);
template void outerProdGPU(unsigned int* h_first, const int& rowFirst, unsigned int* h_second, const int& rowSecond, unsigned int* h_result);
template void outerProdGPUSH(unsigned int* h_first, const int& rowFirst, unsigned int* h_second, const int& rowSecond, unsigned int* h_result);
template void vectorsDiffGPU(unsigned int* h_first, unsigned int* h_second, const int& rows, unsigned int* h_result);
template void vectorsDiffGPUSH(unsigned int* h_first, unsigned int* h_second, const int& rows, unsigned int* h_result);
//long
template __global__ void MultipMatrices(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(long* d_inMatrix, long* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(long *d_inMatrix, long *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(long* d_inFirst, long* d_inSecond, long* d_out, int rows);
template __global__ void VecDiffSH(long* d_inFirst, long* d_inSecond, long* d_out, int rows);
template void matricesDotProductGPU(long* h_first, const int& rowFirst, const int& colFirst, long* h_second, const int& rowSecond, const int& colSecond, long* h_result);
template void matricesDotProductGPUSH(long* h_first, const int& rowFirst, const int& colFirst, long* h_second, const int& rowSecond, const int& colSecond, long* h_result);
template void transposeGPU(long* h_matrix, const int& rowMatrix, const int& colMatrix, long* h_result);
template void transposeGPUSH(long* h_matrix, const int& rowMatrix, const int& colMatrix, long* h_result);
template void outerProdGPU(long* h_first, const int& rowFirst, long* h_second, const int& rowSecond, long* h_result);
template void outerProdGPUSH(long* h_first, const int& rowFirst, long* h_second, const int& rowSecond, long* h_result);
template void vectorsDiffGPU(long* h_first, long* h_second, const int& rows, long* h_result);
template void vectorsDiffGPUSH(long* h_first, long* h_second, const int& rows, long* h_result);
//unsigned long
template __global__ void MultipMatrices(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(unsigned long* d_inMatrix, unsigned long* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(unsigned long *d_inMatrix, unsigned long *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rows);
template __global__ void VecDiffSH(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rows);
template void matricesDotProductGPU(unsigned long* h_first, const int& rowFirst, const int& colFirst, unsigned long* h_second, const int& rowSecond, const int& colSecond, unsigned long* h_result);
template void matricesDotProductGPUSH(unsigned long* h_first, const int& rowFirst, const int& colFirst, unsigned long* h_second, const int& rowSecond, const int& colSecond, unsigned long* h_result);
template void transposeGPU(unsigned long* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned long* h_result);
template void transposeGPUSH(unsigned long* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned long* h_result);
template void outerProdGPU(unsigned long* h_first, const int& rowFirst, unsigned long* h_second, const int& rowSecond, unsigned long* h_result);
template void outerProdGPUSH(unsigned long* h_first, const int& rowFirst, unsigned long* h_second, const int& rowSecond, unsigned long* h_result);
template void vectorsDiffGPU(unsigned long* h_first, unsigned long* h_second, const int& rows, unsigned long* h_result);
template void vectorsDiffGPUSH(unsigned long* h_first, unsigned long* h_second, const int& rows, unsigned long* h_result);
| 53808250d34c4636b365d069b2f216f4eefd3b96.cu | #include "linearAlgebraCUDA.cuh"
#define BLOCK_SIZE 32
//Kernels.
template<typename T> __global__ void MultipMatrices(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int colFirst, int colSecond)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
T sum = 0.0f;
if (row >= rowFirst || col >= colSecond)
{
return;
}
for (int i = 0; i < colFirst; i++)
{
sum += d_inFirst[row * colFirst + i] * d_inSecond[i * colSecond + col];
}
d_out[row * colSecond + col] = sum;
}
template<typename T> __global__ void MultipMatricesSH(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int colFirst, int colSecond)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ T shFirst[BLOCK_SIZE * BLOCK_SIZE];
__shared__ T shSecond[BLOCK_SIZE * BLOCK_SIZE];
T sum = 0.0f;
for (int i = 0; i < (BLOCK_SIZE + colFirst - 1) / BLOCK_SIZE; i++)
{
if (i * BLOCK_SIZE + threadIdx.x < colFirst && row < rowFirst)
{
shFirst[threadIdx.y * BLOCK_SIZE + threadIdx.x] = d_inFirst[row * colFirst + i * BLOCK_SIZE + threadIdx.x];
}
else
{
shFirst[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0.0f;
}
if (i * BLOCK_SIZE + threadIdx.y < colFirst && col < colSecond)
{
shSecond[threadIdx.y * BLOCK_SIZE + threadIdx.x] = d_inSecond[(i * BLOCK_SIZE + threadIdx.y) * colSecond + col];
}
else
{
shSecond[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0.0f;
}
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
{
sum += shFirst[threadIdx.y * BLOCK_SIZE + j] * shSecond[j * BLOCK_SIZE + threadIdx.x];
}
__syncthreads();
}
if (row < rowFirst && col < colSecond)
{
d_out[((blockIdx.y * blockDim.y + threadIdx.y) * colSecond) + (blockIdx.x * blockDim.x) + threadIdx.x] = sum;
}
}
template<typename T> __global__ void Transp(T* d_inMatrix, T* d_out, int rowMatrix, int colMatrix)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= rowMatrix || col >= colMatrix)
{
return;
}
d_out[col * rowMatrix + row] = d_inMatrix[row * colMatrix + col];
}
template<typename T> __global__ void TranspSH(T *d_inMatrix, T *d_out, int rowMatrix, int colMatrix)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ T shTile[BLOCK_SIZE * BLOCK_SIZE];
if (blockIdx.y * blockDim.y + threadIdx.y >= rowMatrix || blockIdx.x * blockDim.x + threadIdx.x >= colMatrix)
{
return;
}
shTile[threadIdx.y * BLOCK_SIZE + threadIdx.x] = d_inMatrix[row * colMatrix + col];
__syncthreads();
d_out[col * rowMatrix + row] = shTile[threadIdx.y * BLOCK_SIZE + threadIdx.x];
}
template<typename T> __global__ void OutPr(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int rowSecond)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= rowFirst || col >= rowSecond)
{
return;
}
d_out[row * rowSecond + col] = d_inFirst[row] * d_inSecond[col];
}
template<typename T> __global__ void OutPrSH(T* d_inFirst, T* d_inSecond, T* d_out, int rowFirst, int rowSecond)
{
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
__shared__ T shFirst[BLOCK_SIZE * BLOCK_SIZE];
__shared__ T shSecond[BLOCK_SIZE * BLOCK_SIZE];
T prod = 0.0f;
if (blockIdx.y * blockDim.y + threadIdx.y >= rowFirst || blockIdx.x * blockDim.x + threadIdx.x >= rowSecond)
{
return;
}
shFirst[row] = d_inFirst[blockIdx.y * blockDim.y + threadIdx.y];
shSecond[col] = d_inSecond[blockIdx.x * blockDim.x + threadIdx.x];
__syncthreads();
prod = shFirst[row] * shSecond[col];
d_out[(blockIdx.y * blockDim.y + threadIdx.y) * rowSecond + (blockIdx.x * blockDim.x + threadIdx.x)] = prod;
}
template<typename T> __global__ void VecDiff(T* d_inFirst, T* d_inSecond, T* d_out, int rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= rows)
{
return;
}
d_out[row] = d_inFirst[row] - d_inSecond[row];
}
template<typename T> __global__ void VecDiffSH(T* d_inFirst, T* d_inSecond, T* d_out, int rows)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T shFirst[BLOCK_SIZE * BLOCK_SIZE];
__shared__ T shSecond[BLOCK_SIZE * BLOCK_SIZE];
T diff = 0.0f;
if (row >= rows)
{
return;
}
shFirst[blockIdx.x * BLOCK_SIZE + threadIdx.x] = d_inFirst[row];
shSecond[blockIdx.x * BLOCK_SIZE + threadIdx.x] = d_inSecond[row];
__syncthreads();
diff = shFirst[blockIdx.x * BLOCK_SIZE + threadIdx.x] - shSecond[blockIdx.x * BLOCK_SIZE + threadIdx.x];
d_out[row] = diff;
}
//Functions.
template<typename T> void matricesDotProductGPU(T* h_first, const int& rowFirst, const int& colFirst, T* h_second, const int& rowSecond, const int& colSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = (rowFirst * colFirst) * sizeof(T);
const int BYSECOND = (rowSecond * colSecond) * sizeof(T);
const int BYRESULT = (rowFirst * colSecond) * sizeof(T);
cudaMalloc((void**)&d_inFirst, BYFIRST);
cudaMalloc((void**)&d_inSecond, BYSECOND);
cudaMalloc((void**)&d_out, BYRESULT);
cudaMemcpy(d_inFirst, h_first, BYFIRST, cudaMemcpyHostToDevice);
cudaMemcpy(d_inSecond, h_second, BYSECOND, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
MultipMatrices <<<dimGrid, dimBlock>>>(d_inFirst, d_inSecond, d_out, rowFirst, colFirst, colSecond);
cudaMemcpy(h_result, d_out, BYRESULT, cudaMemcpyDeviceToHost);
cudaFree(d_inFirst);
cudaFree(d_inSecond);
cudaFree(d_out);
}
template<typename T> void matricesDotProductGPUSH(T* h_first, const int& rowFirst, const int& colFirst, T* h_second, const int& rowSecond, const int& colSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = (rowFirst * colFirst) * sizeof(T);
const int BYSECOND = (rowSecond * colSecond) * sizeof(T);
const int BYRESULT = (rowFirst * colSecond) * sizeof(T);
cudaMalloc((void**)&d_inFirst, BYFIRST);
cudaMalloc((void**)&d_inSecond, BYSECOND);
cudaMalloc((void**)&d_out, BYRESULT);
cudaMemcpy(d_inFirst, h_first, BYFIRST, cudaMemcpyHostToDevice);
cudaMemcpy(d_inSecond, h_second, BYSECOND, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
MultipMatricesSH <<<dimGrid, dimBlock>>>(d_inFirst, d_inSecond, d_out, rowFirst, colFirst, colSecond);
cudaMemcpy(h_result, d_out, BYRESULT, cudaMemcpyDeviceToHost);
cudaFree(d_inFirst);
cudaFree(d_inSecond);
cudaFree(d_out);
}
template<typename T> void transposeGPU(T* h_matrix, const int& rowMatrix, const int& colMatrix, T* h_result)
{
T* d_inMatrix;
T* d_out;
const int BYTES = (rowMatrix * colMatrix) * sizeof(T);
cudaMalloc((void**)&d_inMatrix, BYTES);
cudaMalloc((void**)&d_out, BYTES);
cudaMemcpy(d_inMatrix, h_matrix, BYTES, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colMatrix + dimBlock.x - 1) / dimBlock.x, (rowMatrix + dimBlock.y - 1) / dimBlock.y);
Transp <<<dimGrid, dimBlock>>>(d_inMatrix, d_out, rowMatrix, colMatrix);
cudaMemcpy(h_result, d_out, BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_inMatrix);
cudaFree(d_out);
}
template<typename T> void transposeGPUSH(T* h_matrix, const int& rowMatrix, const int& colMatrix, T* h_result)
{
T* d_inMatrix;
T* d_out;
const int BYTES = (rowMatrix * colMatrix) * sizeof(T);
cudaMalloc((void**)&d_inMatrix, BYTES);
cudaMalloc((void**)&d_out, BYTES);
cudaMemcpy(d_inMatrix, h_matrix, BYTES, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((colMatrix + dimBlock.x - 1) / dimBlock.x, (rowMatrix + dimBlock.y - 1) / dimBlock.y);
TranspSH <<<dimGrid, dimBlock>>>(d_inMatrix, d_out, rowMatrix, colMatrix);
cudaMemcpy(h_result, d_out, BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_inMatrix);
cudaFree(d_out);
}
template<typename T> void outerProdGPU(T* h_first, const int& rowFirst, T* h_second, const int& rowSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rowFirst * sizeof(T);
const int BYSECOND = rowSecond * sizeof(T);
const int BYRESULT = (rowFirst * rowSecond) * sizeof(T);
cudaMalloc((void**)&d_inFirst, BYFIRST);
cudaMalloc((void**)&d_inSecond, BYSECOND);
cudaMalloc((void**)&d_out, BYRESULT);
cudaMemcpy(d_inFirst, h_first, BYFIRST, cudaMemcpyHostToDevice);
cudaMemcpy(d_inSecond, h_second, BYSECOND, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((rowSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
OutPr <<<dimGrid, dimBlock>>>(d_inFirst, d_inSecond, d_out, rowFirst, rowSecond);
cudaMemcpy(h_result, d_out, BYRESULT, cudaMemcpyDeviceToHost);
cudaFree(d_inFirst);
cudaFree(d_inSecond);
cudaFree(d_out);
}
template<typename T> void outerProdGPUSH(T* h_first, const int& rowFirst, T* h_second, const int& rowSecond, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rowFirst * sizeof(T);
const int BYSECOND = rowSecond * sizeof(T);
const int BYRESULT = (rowFirst * rowSecond) * sizeof(T);
cudaMalloc((void**)&d_inFirst, BYFIRST);
cudaMalloc((void**)&d_inSecond, BYSECOND);
cudaMalloc((void**)&d_out, BYRESULT);
cudaMemcpy(d_inFirst, h_first, BYFIRST, cudaMemcpyHostToDevice);
cudaMemcpy(d_inSecond, h_second, BYSECOND, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((rowSecond + dimBlock.x - 1) / dimBlock.x, (rowFirst + dimBlock.y - 1) / dimBlock.y);
OutPrSH <<<dimGrid, dimBlock>>>(d_inFirst, d_inSecond, d_out, rowFirst, rowSecond);
cudaMemcpy(h_result, d_out, BYRESULT, cudaMemcpyDeviceToHost);
cudaFree(d_inFirst);
cudaFree(d_inSecond);
cudaFree(d_out);
}
template<typename T> void vectorsDiffGPU(T* h_first, T* h_second, const int& rows, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rows * sizeof(T);
const int BYSECOND = rows * sizeof(T);
const int BYRESULT = rows * sizeof(T);
cudaMalloc((void**)&d_inFirst, BYFIRST);
cudaMalloc((void**)&d_inSecond, BYSECOND);
cudaMalloc((void**)&d_out, BYRESULT);
cudaMemcpy(d_inFirst, h_first, BYFIRST, cudaMemcpyHostToDevice);
cudaMemcpy(d_inSecond, h_second, BYSECOND, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((rows + dimBlock.x - 1) / dimBlock.x);
VecDiff <<<dimGrid, dimBlock>>>(d_inFirst, d_inSecond, d_out, rows);
cudaMemcpy(h_result, d_out, BYRESULT, cudaMemcpyDeviceToHost);
cudaFree(d_inFirst);
cudaFree(d_inSecond);
cudaFree(d_out);
}
template<typename T> void vectorsDiffGPUSH(T* h_first, T* h_second, const int& rows, T* h_result)
{
T* d_inFirst;
T* d_inSecond;
T* d_out;
const int BYFIRST = rows * sizeof(T);
const int BYSECOND = rows * sizeof(T);
const int BYRESULT = rows * sizeof(T);
cudaMalloc((void**)&d_inFirst, BYFIRST);
cudaMalloc((void**)&d_inSecond, BYSECOND);
cudaMalloc((void**)&d_out, BYRESULT);
cudaMemcpy(d_inFirst, h_first, BYFIRST, cudaMemcpyHostToDevice);
cudaMemcpy(d_inSecond, h_second, BYSECOND, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_SIZE * BLOCK_SIZE);
dim3 dimGrid((rows + dimBlock.x - 1) / dimBlock.x);
VecDiffSH <<<dimGrid, dimBlock>>>(d_inFirst, d_inSecond, d_out, rows);
cudaMemcpy(h_result, d_out, BYRESULT, cudaMemcpyDeviceToHost);
cudaFree(d_inFirst);
cudaFree(d_inSecond);
cudaFree(d_out);
}
//Forced template instantiations.
//float
template __global__ void MultipMatrices(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(float* d_inMatrix, float* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(float *d_inMatrix, float *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(float* d_inFirst, float* d_inSecond, float* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(float* d_inFirst, float* d_inSecond, float* d_out, int rows);
template __global__ void VecDiffSH(float* d_inFirst, float* d_inSecond, float* d_out, int rows);
template void matricesDotProductGPU(float* h_first, const int& rowFirst, const int& colFirst, float* h_second, const int& rowSecond, const int& colSecond, float* h_result);
template void matricesDotProductGPUSH(float* h_first, const int& rowFirst, const int& colFirst, float* h_second, const int& rowSecond, const int& colSecond, float* h_result);
template void transposeGPU(float* h_matrix, const int& rowMatrix, const int& colMatrix, float* h_result);
template void transposeGPUSH(float* h_matrix, const int& rowMatrix, const int& colMatrix, float* h_result);
template void outerProdGPU(float* h_first, const int& rowFirst, float* h_second, const int& rowSecond, float* h_result);
template void outerProdGPUSH(float* h_first, const int& rowFirst, float* h_second, const int& rowSecond, float* h_result);
template void vectorsDiffGPU(float* h_first, float* h_second, const int& rows, float* h_result);
template void vectorsDiffGPUSH(float* h_first, float* h_second, const int& rows, float* h_result);
//double
template __global__ void MultipMatrices(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(double* d_inMatrix, double* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(double *d_inMatrix, double *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(double* d_inFirst, double* d_inSecond, double* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(double* d_inFirst, double* d_inSecond, double* d_out, int rows);
template __global__ void VecDiffSH(double* d_inFirst, double* d_inSecond, double* d_out, int rows);
template void matricesDotProductGPU(double* h_first, const int& rowFirst, const int& colFirst, double* h_second, const int& rowSecond, const int& colSecond, double* h_result);
template void matricesDotProductGPUSH(double* h_first, const int& rowFirst, const int& colFirst, double* h_second, const int& rowSecond, const int& colSecond, double* h_result);
template void transposeGPU(double* h_matrix, const int& rowMatrix, const int& colMatrix, double* h_result);
template void transposeGPUSH(double* h_matrix, const int& rowMatrix, const int& colMatrix, double* h_result);
template void outerProdGPU(double* h_first, const int& rowFirst, double* h_second, const int& rowSecond, double* h_result);
template void outerProdGPUSH(double* h_first, const int& rowFirst, double* h_second, const int& rowSecond, double* h_result);
template void vectorsDiffGPU(double* h_first, double* h_second, const int& rows, double* h_result);
template void vectorsDiffGPUSH(double* h_first, double* h_second, const int& rows, double* h_result);
//int
template __global__ void MultipMatrices(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(int* d_inMatrix, int* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(int *d_inMatrix, int *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(int* d_inFirst, int* d_inSecond, int* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(int* d_inFirst, int* d_inSecond, int* d_out, int rows);
template __global__ void VecDiffSH(int* d_inFirst, int* d_inSecond, int* d_out, int rows);
template void matricesDotProductGPU(int* h_first, const int& rowFirst, const int& colFirst, int* h_second, const int& rowSecond, const int& colSecond, int* h_result);
template void matricesDotProductGPUSH(int* h_first, const int& rowFirst, const int& colFirst, int* h_second, const int& rowSecond, const int& colSecond, int* h_result);
template void transposeGPU(int* h_matrix, const int& rowMatrix, const int& colMatrix, int* h_result);
template void transposeGPUSH(int* h_matrix, const int& rowMatrix, const int& colMatrix, int* h_result);
template void outerProdGPU(int* h_first, const int& rowFirst, int* h_second, const int& rowSecond, int* h_result);
template void outerProdGPUSH(int* h_first, const int& rowFirst, int* h_second, const int& rowSecond, int* h_result);
template void vectorsDiffGPU(int* h_first, int* h_second, const int& rows, int* h_result);
template void vectorsDiffGPUSH(int* h_first, int* h_second, const int& rows, int* h_result);
//unsigned int
template __global__ void MultipMatrices(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(unsigned int* d_inMatrix, unsigned int* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(unsigned int *d_inMatrix, unsigned int *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rows);
template __global__ void VecDiffSH(unsigned int* d_inFirst, unsigned int* d_inSecond, unsigned int* d_out, int rows);
template void matricesDotProductGPU(unsigned int* h_first, const int& rowFirst, const int& colFirst, unsigned int* h_second, const int& rowSecond, const int& colSecond, unsigned int* h_result);
template void matricesDotProductGPUSH(unsigned int* h_first, const int& rowFirst, const int& colFirst, unsigned int* h_second, const int& rowSecond, const int& colSecond, unsigned int* h_result);
template void transposeGPU(unsigned int* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned int* h_result);
template void transposeGPUSH(unsigned int* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned int* h_result);
template void outerProdGPU(unsigned int* h_first, const int& rowFirst, unsigned int* h_second, const int& rowSecond, unsigned int* h_result);
template void outerProdGPUSH(unsigned int* h_first, const int& rowFirst, unsigned int* h_second, const int& rowSecond, unsigned int* h_result);
template void vectorsDiffGPU(unsigned int* h_first, unsigned int* h_second, const int& rows, unsigned int* h_result);
template void vectorsDiffGPUSH(unsigned int* h_first, unsigned int* h_second, const int& rows, unsigned int* h_result);
//long
template __global__ void MultipMatrices(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(long* d_inMatrix, long* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(long *d_inMatrix, long *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(long* d_inFirst, long* d_inSecond, long* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(long* d_inFirst, long* d_inSecond, long* d_out, int rows);
template __global__ void VecDiffSH(long* d_inFirst, long* d_inSecond, long* d_out, int rows);
template void matricesDotProductGPU(long* h_first, const int& rowFirst, const int& colFirst, long* h_second, const int& rowSecond, const int& colSecond, long* h_result);
template void matricesDotProductGPUSH(long* h_first, const int& rowFirst, const int& colFirst, long* h_second, const int& rowSecond, const int& colSecond, long* h_result);
template void transposeGPU(long* h_matrix, const int& rowMatrix, const int& colMatrix, long* h_result);
template void transposeGPUSH(long* h_matrix, const int& rowMatrix, const int& colMatrix, long* h_result);
template void outerProdGPU(long* h_first, const int& rowFirst, long* h_second, const int& rowSecond, long* h_result);
template void outerProdGPUSH(long* h_first, const int& rowFirst, long* h_second, const int& rowSecond, long* h_result);
template void vectorsDiffGPU(long* h_first, long* h_second, const int& rows, long* h_result);
template void vectorsDiffGPUSH(long* h_first, long* h_second, const int& rows, long* h_result);
//unsigned long
template __global__ void MultipMatrices(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void MultipMatricesSH(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int colFirst, int colSecond);
template __global__ void Transp(unsigned long* d_inMatrix, unsigned long* d_out, int rowMatrix, int colMatrix);
template __global__ void TranspSH(unsigned long *d_inMatrix, unsigned long *d_out, int rowMatrix, int colMatrix);
template __global__ void OutPr(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int rowSecond);
template __global__ void OutPrSH(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rowFirst, int rowSecond);
template __global__ void VecDiff(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rows);
template __global__ void VecDiffSH(unsigned long* d_inFirst, unsigned long* d_inSecond, unsigned long* d_out, int rows);
template void matricesDotProductGPU(unsigned long* h_first, const int& rowFirst, const int& colFirst, unsigned long* h_second, const int& rowSecond, const int& colSecond, unsigned long* h_result);
template void matricesDotProductGPUSH(unsigned long* h_first, const int& rowFirst, const int& colFirst, unsigned long* h_second, const int& rowSecond, const int& colSecond, unsigned long* h_result);
template void transposeGPU(unsigned long* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned long* h_result);
template void transposeGPUSH(unsigned long* h_matrix, const int& rowMatrix, const int& colMatrix, unsigned long* h_result);
template void outerProdGPU(unsigned long* h_first, const int& rowFirst, unsigned long* h_second, const int& rowSecond, unsigned long* h_result);
template void outerProdGPUSH(unsigned long* h_first, const int& rowFirst, unsigned long* h_second, const int& rowSecond, unsigned long* h_result);
template void vectorsDiffGPU(unsigned long* h_first, unsigned long* h_second, const int& rows, unsigned long* h_result);
template void vectorsDiffGPUSH(unsigned long* h_first, unsigned long* h_second, const int& rows, unsigned long* h_result);
|
96694a5ea5caec16cb8c05577eb9266d40c41c0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/**
* Copyright (c) 2021 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
//
#include "saiga/core/time/all.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/reduce.h"
#include "saiga/vision/features/Features.h"
#include "saiga/vision/features/OrbPattern.h"
#ifdef SAIGA_VISION
#include "OrbDescriptors.h"
namespace Saiga
{
namespace CUDA
{
const int HALF_PATCH_SIZE = 15;
__constant__ unsigned char c_pattern[sizeof(int2) * 512];
__constant__ int c_u_max[32];
ORB::ORB()
{
auto pattern = Saiga::ORBPattern::DescriptorPattern();
static_assert(sizeof(Saiga::ivec2) == 2 * sizeof(int), "laksdf");
CHECK_CUDA_ERROR(hipMemcpyToSymbol(c_pattern, pattern.data(), sizeof(Saiga::ivec2) * pattern.size()));
auto u_max = Saiga::ORBPattern::AngleUmax();
CHECK_CUDA_ERROR(hipMemcpyToSymbol(c_u_max, u_max.data(), u_max.size() * sizeof(int)));
}
__global__ void calcOrb_kernel(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> descriptors)
{
int id = blockIdx.x;
int tid = threadIdx.x;
if (id >= keypoints.size()) return;
__shared__ unsigned char result[32];
const auto& kpt = keypoints[id];
float2 loc = {kpt.point(0), kpt.point(1)};
const auto* pattern = ((int2*)c_pattern) + 16 * tid;
unsigned char* desc = (unsigned char*)&descriptors[id];
const float factorPI = (float)(pi<float>() / 180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
auto GET_VALUE = [&](int idx) -> int {
int2 pat = pattern[idx];
float fx = loc.x + (pat.x * a - pat.y * b);
float fy = loc.y + (pat.x * b + pat.y * a);
// int x = __float2int_rn(fx);
// int y = __float2int_rn(fy);
// image.mirrorToEdge(y, x);
// CUDA_ASSERT(image.inImage(y, x));
// return image(y, x);
return tex2D<unsigned char>(tex, fx + 0.5, fy + 0.5);
};
t0 = GET_VALUE(0);
t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2);
t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4);
t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6);
t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8);
t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10);
t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12);
t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14);
t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
result[threadIdx.x] = (unsigned char)val;
if (threadIdx.x < 8)
{
auto data_int = (int*)result;
((int*)desc)[threadIdx.x] = data_int[threadIdx.x];
}
}
void ORB::ComputeDescriptors(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> _descriptors, hipStream_t stream)
{
if (_keypoints.empty())
{
return;
}
SAIGA_ASSERT(_keypoints.size() == _descriptors.size());
hipLaunchKernelGGL(( calcOrb_kernel), dim3(_keypoints.size()), dim3(32), 0, stream, tex, image, _keypoints, _descriptors);
}
__global__ void IC_Angle_kernel(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints)
{
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx >= keypoints.size()) return;
int m_01 = 0, m_10 = 0;
const int2 loc = make_int2(keypoints[ptidx].point(0), keypoints[ptidx].point(1));
// Treat the center line differently, v=0
for (int u = threadIdx.x - HALF_PATCH_SIZE; u <= HALF_PATCH_SIZE; u += blockDim.x)
{
m_10 += u * tex2D<unsigned char>(tex, loc.x + u, loc.y);
}
m_10 = Saiga::CUDA::warpReduceSum<int, 32, false>(m_10);
for (int v = 1; v <= HALF_PATCH_SIZE; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
// int val_plus = image(loc.y + v, loc.x + u);
// int val_minus = image(loc.y - v, loc.x + u);
int val_plus = tex2D<unsigned char>(tex, loc.x + u, loc.y + v);
int val_minus = tex2D<unsigned char>(tex, loc.x + u, loc.y - v);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
m_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(m_sum);
v_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(v_sum);
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
float kp_dir = atan2((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * float(pi<float>()));
kp_dir *= 180.0f / float(pi<float>());
keypoints[ptidx].angle = kp_dir;
}
}
__global__ void addBorder_kernel(Saiga::KeyPoint<float>* keypoints, int npoints, int minBorderX, int minBorderY,
int octave, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npoints)
{
return;
}
keypoints[tid].point(0) += minBorderX;
keypoints[tid].point(1) += minBorderY;
keypoints[tid].octave = octave;
keypoints[tid].size = size;
}
void ORB::ComputeAngles(hipTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints, int minBorderX, int minBorderY, int octave,
int size, hipStream_t stream)
{
if (_keypoints.empty())
{
return;
}
{
dim3 block(256);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.x));
hipLaunchKernelGGL(( addBorder_kernel), dim3(grid), dim3(block), 0, stream, _keypoints.data(), _keypoints.size(), minBorderX, minBorderY,
octave, size);
}
{
dim3 block(32, 8);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.y));
hipLaunchKernelGGL(( IC_Angle_kernel), dim3(grid), dim3(block), 0, stream, tex, image, _keypoints);
}
}
} // namespace CUDA
} // namespace Saiga
#endif
| 96694a5ea5caec16cb8c05577eb9266d40c41c0c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
/**
* Copyright (c) 2021 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
//
#include "saiga/core/time/all.h"
#include "saiga/cuda/device_helper.h"
#include "saiga/cuda/reduce.h"
#include "saiga/vision/features/Features.h"
#include "saiga/vision/features/OrbPattern.h"
#ifdef SAIGA_VISION
#include "OrbDescriptors.h"
namespace Saiga
{
namespace CUDA
{
const int HALF_PATCH_SIZE = 15;
__constant__ unsigned char c_pattern[sizeof(int2) * 512];
__constant__ int c_u_max[32];
ORB::ORB()
{
auto pattern = Saiga::ORBPattern::DescriptorPattern();
static_assert(sizeof(Saiga::ivec2) == 2 * sizeof(int), "laksdf");
CHECK_CUDA_ERROR(cudaMemcpyToSymbol(c_pattern, pattern.data(), sizeof(Saiga::ivec2) * pattern.size()));
auto u_max = Saiga::ORBPattern::AngleUmax();
CHECK_CUDA_ERROR(cudaMemcpyToSymbol(c_u_max, u_max.data(), u_max.size() * sizeof(int)));
}
__global__ void calcOrb_kernel(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> descriptors)
{
int id = blockIdx.x;
int tid = threadIdx.x;
if (id >= keypoints.size()) return;
__shared__ unsigned char result[32];
const auto& kpt = keypoints[id];
float2 loc = {kpt.point(0), kpt.point(1)};
const auto* pattern = ((int2*)c_pattern) + 16 * tid;
unsigned char* desc = (unsigned char*)&descriptors[id];
const float factorPI = (float)(pi<float>() / 180.f);
float angle = (float)kpt.angle * factorPI;
float a = (float)cosf(angle), b = (float)sinf(angle);
int t0, t1, val;
auto GET_VALUE = [&](int idx) -> int {
int2 pat = pattern[idx];
float fx = loc.x + (pat.x * a - pat.y * b);
float fy = loc.y + (pat.x * b + pat.y * a);
// int x = __float2int_rn(fx);
// int y = __float2int_rn(fy);
// image.mirrorToEdge(y, x);
// CUDA_ASSERT(image.inImage(y, x));
// return image(y, x);
return tex2D<unsigned char>(tex, fx + 0.5, fy + 0.5);
};
t0 = GET_VALUE(0);
t1 = GET_VALUE(1);
val = t0 < t1;
t0 = GET_VALUE(2);
t1 = GET_VALUE(3);
val |= (t0 < t1) << 1;
t0 = GET_VALUE(4);
t1 = GET_VALUE(5);
val |= (t0 < t1) << 2;
t0 = GET_VALUE(6);
t1 = GET_VALUE(7);
val |= (t0 < t1) << 3;
t0 = GET_VALUE(8);
t1 = GET_VALUE(9);
val |= (t0 < t1) << 4;
t0 = GET_VALUE(10);
t1 = GET_VALUE(11);
val |= (t0 < t1) << 5;
t0 = GET_VALUE(12);
t1 = GET_VALUE(13);
val |= (t0 < t1) << 6;
t0 = GET_VALUE(14);
t1 = GET_VALUE(15);
val |= (t0 < t1) << 7;
result[threadIdx.x] = (unsigned char)val;
if (threadIdx.x < 8)
{
auto data_int = (int*)result;
((int*)desc)[threadIdx.x] = data_int[threadIdx.x];
}
}
void ORB::ComputeDescriptors(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints,
Saiga::ArrayView<Saiga::DescriptorORB> _descriptors, cudaStream_t stream)
{
if (_keypoints.empty())
{
return;
}
SAIGA_ASSERT(_keypoints.size() == _descriptors.size());
calcOrb_kernel<<<_keypoints.size(), 32, 0, stream>>>(tex, image, _keypoints, _descriptors);
}
__global__ void IC_Angle_kernel(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> keypoints)
{
const int ptidx = blockIdx.x * blockDim.y + threadIdx.y;
if (ptidx >= keypoints.size()) return;
int m_01 = 0, m_10 = 0;
const int2 loc = make_int2(keypoints[ptidx].point(0), keypoints[ptidx].point(1));
// Treat the center line differently, v=0
for (int u = threadIdx.x - HALF_PATCH_SIZE; u <= HALF_PATCH_SIZE; u += blockDim.x)
{
m_10 += u * tex2D<unsigned char>(tex, loc.x + u, loc.y);
}
m_10 = Saiga::CUDA::warpReduceSum<int, 32, false>(m_10);
for (int v = 1; v <= HALF_PATCH_SIZE; ++v)
{
// Proceed over the two lines
int v_sum = 0;
int m_sum = 0;
const int d = c_u_max[v];
for (int u = threadIdx.x - d; u <= d; u += blockDim.x)
{
// int val_plus = image(loc.y + v, loc.x + u);
// int val_minus = image(loc.y - v, loc.x + u);
int val_plus = tex2D<unsigned char>(tex, loc.x + u, loc.y + v);
int val_minus = tex2D<unsigned char>(tex, loc.x + u, loc.y - v);
v_sum += (val_plus - val_minus);
m_sum += u * (val_plus + val_minus);
}
m_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(m_sum);
v_sum = Saiga::CUDA::warpReduceSum<int, 32, false>(v_sum);
m_10 += m_sum;
m_01 += v * v_sum;
}
if (threadIdx.x == 0)
{
float kp_dir = atan2((float)m_01, (float)m_10);
kp_dir += (kp_dir < 0) * (2.0f * float(pi<float>()));
kp_dir *= 180.0f / float(pi<float>());
keypoints[ptidx].angle = kp_dir;
}
}
__global__ void addBorder_kernel(Saiga::KeyPoint<float>* keypoints, int npoints, int minBorderX, int minBorderY,
int octave, int size)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= npoints)
{
return;
}
keypoints[tid].point(0) += minBorderX;
keypoints[tid].point(1) += minBorderY;
keypoints[tid].octave = octave;
keypoints[tid].size = size;
}
void ORB::ComputeAngles(cudaTextureObject_t tex, Saiga::ImageView<unsigned char> image,
Saiga::ArrayView<Saiga::KeyPoint<float>> _keypoints, int minBorderX, int minBorderY, int octave,
int size, cudaStream_t stream)
{
if (_keypoints.empty())
{
return;
}
{
dim3 block(256);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.x));
addBorder_kernel<<<grid, block, 0, stream>>>(_keypoints.data(), _keypoints.size(), minBorderX, minBorderY,
octave, size);
}
{
dim3 block(32, 8);
dim3 grid(Saiga::iDivUp<int>(_keypoints.size(), block.y));
IC_Angle_kernel<<<grid, block, 0, stream>>>(tex, image, _keypoints);
}
}
} // namespace CUDA
} // namespace Saiga
#endif
|
35e7a599c0f0f0f85541ec3a552563625c2a92ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/hip/Atomic.cuh>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/NumericLimits.cuh>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
#include <ATen/ops/adaptive_max_pool2d_native.h>
#include <ATen/ops/empty.h>
#endif
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at::native {
namespace {
__device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
return (a / b) * c + ((a % b) * c) / b;
}
__device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
return 1 + ((a + 1) * c - 1) / b;
}
// 4d tensor B x D x H x W
/*
* Description:
* this function adaptively maxpools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output, 4D argmax x and y
*/
template <typename T>
__global__ void adaptivemaxpool(const T *input, T *output, int64_t *indices,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators
int oh, ow;
// compute offsets based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
// select input/output plane
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
indices = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = start_index(oh, osizeH, isizeH);
int iendH = end_index(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = start_index(ow, osizeW, isizeW);
int iendW = end_index(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the mean of the input image...
const T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
int64_t *ptr_ind = indices + oh*osizeW + ow;
int argmax = istartH * isizeW + istartW;
T max = at::numeric_limits<T>::lower_bound(); // -Infinity
int ih, iw;
for(ih = 0; ih < kH; ih++) {
for(iw = 0; iw < kW; iw++) {
T val = ptr_input[iw*istrideW];
if ((val > max) || at::_isnan(val)) {
max = val;
argmax = (ih+istartH)*isizeW + iw+istartW;
}
}
ptr_input += istrideH; // next input line
}
// Update output and argmax
*ptr_output = max;
*ptr_ind = argmax;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
template <typename T>
__global__ void adaptivemaxgradinput(T *gradInput, const T *gradOutput, const int64_t *indices,
int isizeH, int isizeW,
int osizeH, int osizeW)
{
// iterators
int oh, ow;
// compute offsets based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
//int k = blockIdx.x % sizeD;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
indices = indices + o_plane*osizeH*osizeW;
// compute gradInput
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
const T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
const int64_t *ptr_ind = indices + oh*osizeW + ow;
T z = *ptr_gradOutput;
int argmax = (*ptr_ind);
gradInput[argmax] += z;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
* when kH != dH or kW != dW (uses atomic add)
*/
template <typename T>
__global__ void atomicadaptivemaxgradinput(
T *gradInput, const T *gradOutput, const int64_t *indices,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators
int oh, ow;
// compute offsets based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
indices = indices + o_plane*osizeH*osizeW;
// compute gradInput
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
const T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
const int64_t *ptr_ind = indices + oh*osizeW + ow;
T z = *ptr_gradOutput;
int argmax = (*ptr_ind);
// atomic add since different threads could update same variable
gpuAtomicAddNoReturn(&(gradInput[argmax]), z);
}
}
}
} // namespace
// 4d tensor B x D x H x W
TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda)
(const Tensor& input,
IntArrayRef output_size,
const Tensor& output,
const Tensor& indices) {
TensorArg output_arg{output, "output", 1};
TensorArg indices_arg{indices, "indices", 2};
TensorArg input_arg{input, "input", 3};
checkAllSameGPU(
__func__, {output_arg, indices_arg, input_arg});
if (input.numel() == 0) {
return;
}
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
const at::Tensor output_c = output.is_contiguous() ? output : at::empty(output.sizes(), output.options());
const at::Tensor indices_c = indices.is_contiguous() ? indices : at::empty(indices.sizes(), indices.options());
if (input.ndimension() == 3) {
int64_t sizeD = input.size(0);
int64_t isizeH = input.size(1);
int64_t isizeW = input.size(2);
int64_t istrideD = input.stride(0);
int64_t istrideH = input.stride(1);
int64_t istrideW = input.stride(2);
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] {
const scalar_t* input_data = input.const_data_ptr<scalar_t>();
scalar_t* output_data = output_c.mutable_data_ptr<scalar_t>();
int64_t* indices_data = indices_c.mutable_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeD, blocksH);
dim3 threads(32, 8);
// run maxpool kernel
hipLaunchKernelGGL(( adaptivemaxpool),
dim3(blocks),
dim3(threads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data,
output_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW,
istrideD,
istrideH,
istrideW);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
} else {
Tensor input_ = input.contiguous();
int64_t sizeB = input_.size(0);
int64_t sizeD = input_.size(1);
int64_t isizeH = input_.size(2);
int64_t isizeW = input_.size(3);
int64_t istrideD = input_.stride(1);
int64_t istrideH = input_.stride(2);
int64_t istrideW = input_.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
kBFloat16,
input_.scalar_type(),
"adaptive_max_pool2d_cuda",
[&] {
const scalar_t* input_data = input_.const_data_ptr<scalar_t>();
scalar_t* output_data = output_c.mutable_data_ptr<scalar_t>();
int64_t* indices_data = indices_c.mutable_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeB * sizeD, blocksH);
dim3 threads(32, 8);
// run maxpool kernel
hipLaunchKernelGGL(( adaptivemaxpool),
dim3(blocks),
dim3(threads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data,
output_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW,
istrideD,
istrideH,
istrideW);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
if (!output.is_contiguous()) {
output.copy_(output_c);
}
if (!indices.is_contiguous()) {
indices.copy_(indices_c);
}
}
TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cuda)
(const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
const Tensor& gradInput) {
globalContext().alertNotDeterministic(
"adaptive_max_pool2d_backward_cuda");
TensorArg grad_input_arg{gradInput, "gradInput", 1};
TensorArg grad_output_arg{gradOutput, "gradOutput", 2};
TensorArg input_arg{input, "input", 3};
TensorArg indices_arg{indices, "indices", 4};
checkAllSameGPU(
__func__,
{grad_input_arg, grad_output_arg, input_arg, indices_arg});
if (gradOutput.numel() == 0) {
return;
}
bool atomic =
true; // suboptimal, but without atomic it doesn't pass the tests
const at::Tensor gradOutput_ = gradOutput.contiguous();
const at::Tensor indices_ = indices.contiguous();
const at::Tensor gradInput_c = gradInput.is_contiguous() ? gradInput : at::empty(gradInput.sizes(), gradInput.options());
if (input.ndimension() == 3) {
int64_t sizeD = input.size(0);
int64_t isizeH = input.size(1);
int64_t isizeW = input.size(2);
int64_t osizeH = gradOutput_.size(1);
int64_t osizeW = gradOutput_.size(2);
// bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0);
gradInput_c.zero_();
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
kBFloat16,
input.scalar_type(),
"adaptive_max_pool2d_backward_cuda",
[&] {
scalar_t* gradInput_data = gradInput_c.mutable_data_ptr<scalar_t>();
const scalar_t* gradOutput_data = gradOutput_.const_data_ptr<scalar_t>();
const int64_t* indices_data = indices_.const_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeD, blocksH);
dim3 threads(32, 8);
if (atomic) {
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomicadaptivemaxgradinput),
dim3(blocks),
dim3(threads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
// run updateGradInput kernel
hipLaunchKernelGGL(( atomicadaptivemaxgradinput),
dim3(blocks),
dim3(threads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
} else {
int64_t sizeB = input.size(0);
int64_t sizeD = input.size(1);
int64_t isizeH = input.size(2);
int64_t isizeW = input.size(3);
int64_t osizeH = gradOutput_.size(2);
int64_t osizeW = gradOutput_.size(3);
gradInput_c.zero_();
// bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0);
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
kBFloat16,
input.scalar_type(),
"adaptive_max_pool2d_backward_cuda",
[&] {
scalar_t* gradInput_data = gradInput_c.mutable_data_ptr<scalar_t>();
const scalar_t* gradOutput_data = gradOutput_.const_data_ptr<scalar_t>();
const int64_t* indices_data = indices_.const_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeB * sizeD, blocksH);
dim3 threads(32, 8);
if (atomic) {
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( atomicadaptivemaxgradinput),
dim3(blocks),
dim3(threads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
} else {
// run updateGradInput kernel, accumulate gradients atomically
hipLaunchKernelGGL(( adaptivemaxgradinput),
dim3(blocks),
dim3(threads),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_HIP_KERNEL_LAUNCH_CHECK();
}
});
}
if (!gradInput.is_contiguous()) {
gradInput.copy_(gradInput_c);
}
}
} // namespace at::native
| 35e7a599c0f0f0f85541ec3a552563625c2a92ad.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/cuda/Atomic.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/NumericLimits.cuh>
#include <ATen/Dispatch.h>
#include <ATen/NumericUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/adaptive_max_pool2d_backward_native.h>
#include <ATen/ops/adaptive_max_pool2d_native.h>
#include <ATen/ops/empty.h>
#endif
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at::native {
namespace {
__device__ inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
return (a / b) * c + ((a % b) * c) / b;
}
__device__ inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
return 1 + ((a + 1) * c - 1) / b;
}
// 4d tensor B x D x H x W
/*
* Description:
* this function adaptively maxpools an input 4D tensor along dimensions 2 and 3
* 4D input, 4D output, 4D argmax x and y
*/
template <typename T>
__global__ void adaptivemaxpool(const T *input, T *output, int64_t *indices,
int isizeH, int isizeW,
int osizeH, int osizeW,
int64_t istrideD, int64_t istrideH, int64_t istrideW)
{
// iterators
int oh, ow;
// compute offsets based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
int ostartW = threadIdx.x;
int oendW = osizeW;
const int ostepW = blockDim.x;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
const int ostepH = blockDim.y*gridDim.y;
// select input/output plane
output = output + o_plane*osizeH*osizeW;
input = input + i_plane*istrideD;
indices = indices + o_plane*osizeH*osizeW;
// For all output pixels...
for(oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = start_index(oh, osizeH, isizeH);
int iendH = end_index(oh, osizeH, isizeH);
int kH = iendH - istartH;
for(ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = start_index(ow, osizeW, isizeW);
int iendW = end_index(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the mean of the input image...
const T *ptr_input = input + istartH*istrideH + istartW*istrideW;
T *ptr_output = output + oh*osizeW + ow;
int64_t *ptr_ind = indices + oh*osizeW + ow;
int argmax = istartH * isizeW + istartW;
T max = at::numeric_limits<T>::lower_bound(); // -Infinity
int ih, iw;
for(ih = 0; ih < kH; ih++) {
for(iw = 0; iw < kW; iw++) {
T val = ptr_input[iw*istrideW];
if ((val > max) || at::_isnan(val)) {
max = val;
argmax = (ih+istartH)*isizeW + iw+istartW;
}
}
ptr_input += istrideH; // next input line
}
// Update output and argmax
*ptr_output = max;
*ptr_ind = argmax;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
*/
template <typename T>
__global__ void adaptivemaxgradinput(T *gradInput, const T *gradOutput, const int64_t *indices,
int isizeH, int isizeW,
int osizeH, int osizeW)
{
// iterators
int oh, ow;
// compute offsets based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
//int k = blockIdx.x % sizeD;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
indices = indices + o_plane*osizeH*osizeW;
// compute gradInput
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
const T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
const int64_t *ptr_ind = indices + oh*osizeW + ow;
T z = *ptr_gradOutput;
int argmax = (*ptr_ind);
gradInput[argmax] += z;
}
}
}
/*
* Description:
* this function computes the gradInput from weight and gradOutput
* when kH != dH or kW != dW (uses atomic add)
*/
template <typename T>
__global__ void atomicadaptivemaxgradinput(
T *gradInput, const T *gradOutput, const int64_t *indices,
int isizeH, int isizeW, int osizeH, int osizeW
)
{
// iterators
int oh, ow;
// compute offsets based on thread/block ID
int o_plane = blockIdx.x;
int i_plane = o_plane;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
int ostartH = blockDim.y*blockIdx.y + threadIdx.y;
int oendH = osizeH;
int ostepH = blockDim.y*gridDim.y;
// select input/output plane
gradOutput = gradOutput + o_plane*osizeH*osizeW;
gradInput = gradInput + i_plane*isizeH*isizeW;
indices = indices + o_plane*osizeH*osizeW;
// compute gradInput
for(oh = ostartH; oh < oendH; oh += ostepH) {
for(ow = ostartW; ow < oendW; ow += ostepW) {
const T *ptr_gradOutput = gradOutput + oh*osizeW + ow;
const int64_t *ptr_ind = indices + oh*osizeW + ow;
T z = *ptr_gradOutput;
int argmax = (*ptr_ind);
// atomic add since different threads could update same variable
gpuAtomicAddNoReturn(&(gradInput[argmax]), z);
}
}
}
} // namespace
// 4d tensor B x D x H x W
TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda)
(const Tensor& input,
IntArrayRef output_size,
const Tensor& output,
const Tensor& indices) {
TensorArg output_arg{output, "output", 1};
TensorArg indices_arg{indices, "indices", 2};
TensorArg input_arg{input, "input", 3};
checkAllSameGPU(
__func__, {output_arg, indices_arg, input_arg});
if (input.numel() == 0) {
return;
}
int64_t osizeH = output_size[0];
int64_t osizeW = output_size[1];
const at::Tensor output_c = output.is_contiguous() ? output : at::empty(output.sizes(), output.options());
const at::Tensor indices_c = indices.is_contiguous() ? indices : at::empty(indices.sizes(), indices.options());
if (input.ndimension() == 3) {
int64_t sizeD = input.size(0);
int64_t isizeH = input.size(1);
int64_t isizeW = input.size(2);
int64_t istrideD = input.stride(0);
int64_t istrideH = input.stride(1);
int64_t istrideW = input.stride(2);
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] {
const scalar_t* input_data = input.const_data_ptr<scalar_t>();
scalar_t* output_data = output_c.mutable_data_ptr<scalar_t>();
int64_t* indices_data = indices_c.mutable_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeD, blocksH);
dim3 threads(32, 8);
// run maxpool kernel
adaptivemaxpool<<<
blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
input_data,
output_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW,
istrideD,
istrideH,
istrideW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
} else {
Tensor input_ = input.contiguous();
int64_t sizeB = input_.size(0);
int64_t sizeD = input_.size(1);
int64_t isizeH = input_.size(2);
int64_t isizeW = input_.size(3);
int64_t istrideD = input_.stride(1);
int64_t istrideH = input_.stride(2);
int64_t istrideW = input_.stride(3);
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
kBFloat16,
input_.scalar_type(),
"adaptive_max_pool2d_cuda",
[&] {
const scalar_t* input_data = input_.const_data_ptr<scalar_t>();
scalar_t* output_data = output_c.mutable_data_ptr<scalar_t>();
int64_t* indices_data = indices_c.mutable_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeB * sizeD, blocksH);
dim3 threads(32, 8);
// run maxpool kernel
adaptivemaxpool<<<
blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
input_data,
output_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW,
istrideD,
istrideH,
istrideW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
if (!output.is_contiguous()) {
output.copy_(output_c);
}
if (!indices.is_contiguous()) {
indices.copy_(indices_c);
}
}
TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cuda)
(const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
const Tensor& gradInput) {
globalContext().alertNotDeterministic(
"adaptive_max_pool2d_backward_cuda");
TensorArg grad_input_arg{gradInput, "gradInput", 1};
TensorArg grad_output_arg{gradOutput, "gradOutput", 2};
TensorArg input_arg{input, "input", 3};
TensorArg indices_arg{indices, "indices", 4};
checkAllSameGPU(
__func__,
{grad_input_arg, grad_output_arg, input_arg, indices_arg});
if (gradOutput.numel() == 0) {
return;
}
bool atomic =
true; // suboptimal, but without atomic it doesn't pass the tests
const at::Tensor gradOutput_ = gradOutput.contiguous();
const at::Tensor indices_ = indices.contiguous();
const at::Tensor gradInput_c = gradInput.is_contiguous() ? gradInput : at::empty(gradInput.sizes(), gradInput.options());
if (input.ndimension() == 3) {
int64_t sizeD = input.size(0);
int64_t isizeH = input.size(1);
int64_t isizeW = input.size(2);
int64_t osizeH = gradOutput_.size(1);
int64_t osizeW = gradOutput_.size(2);
// bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0);
gradInput_c.zero_();
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
kBFloat16,
input.scalar_type(),
"adaptive_max_pool2d_backward_cuda",
[&] {
scalar_t* gradInput_data = gradInput_c.mutable_data_ptr<scalar_t>();
const scalar_t* gradOutput_data = gradOutput_.const_data_ptr<scalar_t>();
const int64_t* indices_data = indices_.const_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeD, blocksH);
dim3 threads(32, 8);
if (atomic) {
// run updateGradInput kernel, accumulate gradients atomically
atomicadaptivemaxgradinput<<<
blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// run updateGradInput kernel
atomicadaptivemaxgradinput<<<
blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
} else {
int64_t sizeB = input.size(0);
int64_t sizeD = input.size(1);
int64_t isizeH = input.size(2);
int64_t isizeW = input.size(3);
int64_t osizeH = gradOutput_.size(2);
int64_t osizeW = gradOutput_.size(3);
gradInput_c.zero_();
// bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0);
AT_DISPATCH_FLOATING_TYPES_AND2(
kHalf,
kBFloat16,
input.scalar_type(),
"adaptive_max_pool2d_backward_cuda",
[&] {
scalar_t* gradInput_data = gradInput_c.mutable_data_ptr<scalar_t>();
const scalar_t* gradOutput_data = gradOutput_.const_data_ptr<scalar_t>();
const int64_t* indices_data = indices_.const_data_ptr<int64_t>();
// cuda blocks & threads:
int blocksH = (int)(16L / sizeD);
blocksH = blocksH < 1 ? 1 : blocksH;
dim3 blocks(sizeB * sizeD, blocksH);
dim3 threads(32, 8);
if (atomic) {
// run updateGradInput kernel, accumulate gradients atomically
atomicadaptivemaxgradinput<<<
blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
} else {
// run updateGradInput kernel, accumulate gradients atomically
adaptivemaxgradinput<<<
blocks,
threads,
0,
at::cuda::getCurrentCUDAStream()>>>(
gradInput_data,
gradOutput_data,
indices_data,
isizeH,
isizeW,
osizeH,
osizeW);
C10_CUDA_KERNEL_LAUNCH_CHECK();
}
});
}
if (!gradInput.is_contiguous()) {
gradInput.copy_(gradInput_c);
}
}
} // namespace at::native
|
1a5055d9555f1c75384dd6b5801cc11f5985a3df.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <set>
#include <algorithm>
#include <assert.h>
#include "hip/hip_runtime.h"
using namespace std;
#define ITERATION_FINEGRAINED (1)
#define KB (1024/sizeof(int))
#define MB (KB*1024)
#define MAX_NUM_THREADS (1024) // a block has maximal thread size
//kernel function
__global__ void strided_access_onepass(unsigned *arr, int length, int stride, bool record, unsigned *duration, double *help); //used to benchmark the TLB structure
__global__ void strided_access_finegrained(unsigned *arr, int length, bool record, unsigned *duration, unsigned *index); //obsolete: use to attain average cycle and pages visited
void TLB_latency(int N, int stride);
void TLB_benchmarking(int beginSize, int endSize, int stride);
void generate_strided(unsigned *arr, int length, int stride);
void generate_strided_onepass(unsigned *arr, int length, int stride);
//global variables
int numThreadsGlobal;
int numBlocksGlobal;
int dataSizeGlobal; //in MB
int pageSizeGlobal; //in KB
/*
* TLB benchmarking: ./tlb_GPU pageSize_KB dataSize_begin_MB dataSize_end_MB
*
* blockSize=1 and gridSize=1 for TLB benchmarking;
*/
int main(int argc, char* argv[]){
if (argc < 4) {
cerr<<"Shall provide the blockSize, gridSize used and page size."<<endl;
cerr<<"Eg.: ./tlb_GPU bSize gSize dataSize_MB pageSize_KB"<<endl;
exit(0);
}
numThreadsGlobal = 1;
numBlocksGlobal = 1;
pageSizeGlobal = atoi(argv[1]) * KB;
int dataSize_begin = atoi(argv[2]) * MB;
int dataSize_end = atoi(argv[3]) * MB;
hipSetDevice(0);
TLB_benchmarking(dataSize_begin, dataSize_end,pageSizeGlobal);
hipDeviceReset();
return 0;
}
void TLB_benchmarking(int beginSize, int endSize, int stride) {
for (int ds = beginSize; ds <= endSize; ds += stride) {
cout << "Struc: Data size: " << (float)ds / MB << "MB\t" << "Stride: " << stride / MB << "MB\t";
hipDeviceReset();
hipError_t error_id;
unsigned *h_a, *d_a;
unsigned *h_timeinfo, *d_timeinfo;
double *help;
h_a = (unsigned*)malloc(sizeof(unsigned)*ds);
error_id = hipMalloc ((void **) &d_a, sizeof(unsigned)*ds);
if (error_id != hipSuccess) cerr<<"Error 1.0 is "<<hipGetErrorString(error_id)<<endl;
/* initialize array elements on CPU with pointers into d_a. */
generate_strided_onepass(h_a,ds,stride);
/* copy array elements from CPU to GPU */
error_id = hipMemcpy(d_a, h_a, sizeof(unsigned)*ds, hipMemcpyHostToDevice);
if (error_id != hipSuccess) cerr<<"Error 1.1 is "<<hipGetErrorString(error_id)<<endl;
h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
error_id = hipMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
if (error_id != hipSuccess) cerr << "Error 1.2 is " << hipGetErrorString(error_id) << endl;
error_id = hipMalloc((void **) &help, sizeof(double) * numThreadsGlobal * numBlocksGlobal);
if (error_id != hipSuccess) cerr << "Error 1.3 is " << hipGetErrorString(error_id) << endl;
hipDeviceSynchronize();
/* launch kernel*/
dim3 Db = dim3(numThreadsGlobal);
dim3 Dg = dim3(numBlocksGlobal);
hipLaunchKernelGGL(( strided_access_onepass), dim3(Dg), dim3(Db) , 0, 0, d_a, ds, stride, false, NULL, NULL); //warp up
hipLaunchKernelGGL(( strided_access_onepass), dim3(Dg), dim3(Db) , 0, 0, d_a, ds, stride, true, d_timeinfo, help); //formal
hipDeviceSynchronize();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
cerr << "Error kernel is " << hipGetErrorString(error_id) << endl;
}
/* copy results from GPU to CPU */
hipDeviceSynchronize();
error_id = hipMemcpy((void *) h_timeinfo, (void *) d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal, hipMemcpyDeviceToHost);
if (error_id != hipSuccess) cerr << "Error 2.2 is " << hipGetErrorString(error_id) << endl;
double total = 0; //here we use double, otherwise it will overflow
for (int i = 0; i < numThreadsGlobal * numBlocksGlobal; i++) {
total += h_timeinfo[i];
}
total /= (numThreadsGlobal * numBlocksGlobal);
cout << "cycle: " << total << endl;
hipDeviceSynchronize();
/* free memory on GPU */
hipFree(help);
hipFree(d_a);
hipFree(d_timeinfo);
/*free memory on CPU */
free(h_a);
free(h_timeinfo);
hipDeviceReset();
}
}
//used for TLB benchmarking
__global__ void strided_access_onepass(unsigned *arr, int length, int stride, bool record, unsigned *duration, double *help) {
unsigned long start, end;
unsigned gid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned curIdx = 0;
double anc = 0;
double total = 0;
int myIteration = 0;
//traverse the data array once
while (curIdx < length) {
start = clock64();
curIdx = arr[curIdx];
anc += curIdx; //to ensure the curIdx has been read, this instruction is 16-cycle long on K40m
end = clock64();
total += (end-start-16);
myIteration++;
}
if (record) {
duration[gid] = (total/myIteration);
help[gid] = anc;
}
}
void generate_strided_onepass(unsigned *arr, int length, int stride) {
for (int i = 0 ; i < length; i++) {
arr[i] = i+stride;
}
}
//void measure_global() {
//
// int stride = pageSizeGlobal*KB; //2MB stride
// set<int> missPages; //recording the overall missing pages in each case
//
// //begin and end size in MBs
// /* To test the TLB structures the beginSize and endSize is different;
// * To test the latency of multi-thread, beginSize and endSize should set as the data size tested */
// int beginSize = dataSizeGlobal * MB;
// int endSize = dataSizeGlobal * MB;
//
// //1. The L1 TLB has 16 entries. Test with N_min=28 *1024*256, N_max>32*1024*256
// //2. The L2 TLB has 65 entries. Test with N_min=128*1024*256, N_max=160*1024*256
// for (int dataSize = beginSize; dataSize <= endSize; dataSize += (128*KB)) {
//// cout<<"Data size: "<<(float)dataSize/MB<<"MB\t"<<"Stride: "<< stride/MB <<"MB"<<endl;
// cout<<"Data size: "<<(float)dataSize/MB<<"MB\tbsize: "<<numThreadsGlobal<<"\tgsize: "<<numBlocksGlobal<<'\t';
// parametric_measure_global(dataSize, false, stride, missPages); //not finegrained
// }
//}
//void TLB_finegrained(int N, bool finegrained, int stride, set<int> & lastMissPages) {
// hipDeviceReset();
// hipError_t error_id;
// int i;
// unsigned *h_a, *d_a;
// h_a = (unsigned*)malloc(sizeof(unsigned)*N);
// error_id = hipMalloc ((void **) &d_a, sizeof(unsigned)*N);
//
// if (error_id != hipSuccess)
// cerr<<"Error 1.0 is "<<hipGetErrorString(error_id)<<endl;
//
// /* initialize array elements on CPU with pointers into d_a. */
// generate_strided(h_a,N,stride);
// //generate_strided_onepass(h_a,N,(mul)*stride);
//
// /* copy array elements from CPU to GPU */
// error_id = hipMemcpy(d_a, h_a, sizeof(unsigned)*N, hipMemcpyHostToDevice);
// if (error_id != hipSuccess) {
// cerr<<"Error 1.1 is "<<hipGetErrorString(error_id)<<endl;
// }
//
// unsigned *h_index, *h_timeinfo, *d_timeinfo, *d_index;
// double *help;
//
// if (finegrained) {
// h_index = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION);
// h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION);
//
// //recording time and visited locations
// error_id = hipMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION);
// if (error_id != hipSuccess) {
// cerr << "Error 1.2 is " << hipGetErrorString(error_id) << endl;
// }
//
// error_id = hipMalloc((void **) &d_index, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal *ITERATION);
// if (error_id != hipSuccess) {
// cerr << "Error 1.3 is " << hipGetErrorString(error_id) << endl;
// }
// }
// else {
// h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
// error_id = hipMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
// if (error_id != hipSuccess) {
// cerr << "Error 1.4 is " << hipGetErrorString(error_id) << endl;
// }
// error_id = hipMalloc((void **) &help, sizeof(double) * numThreadsGlobal * numBlocksGlobal);
// if (error_id != hipSuccess) {
// cerr << "Error 1.5 is " << hipGetErrorString(error_id) << endl;
// }
// }
//
// hipDeviceSynchronize ();
// /* launch kernel*/
// dim3 Db = dim3(numThreadsGlobal);
// dim3 Dg = dim3(numBlocksGlobal);
// if (finegrained) {
// strided_access_finegrained<<<Dg, Db>>>(d_a, N, false, NULL, NULL);
// strided_access_finegrained<<<Dg, Db>>>(d_a, N, false, d_timeinfo, d_index);
// }
// else {
// strided_access<<<Dg, Db>>>(d_a, N, stride, false, NULL, NULL); //warp up
// strided_access<<<Dg, Db>>>(d_a, N, stride, true, d_timeinfo, help); //formal
// }
//
// hipDeviceSynchronize();
//
// error_id = hipGetLastError();
// if (error_id != hipSuccess) {
// cerr<<"Error kernel is "<<hipGetErrorString(error_id)<<endl;
// }
//
// /* copy results from GPU to CPU */
// hipDeviceSynchronize ();
//
// if (finegrained) {
// error_id = hipMemcpy((void *)h_timeinfo, (void *)d_timeinfo, sizeof(unsigned)*ITERATION*numThreadsGlobal * numBlocksGlobal, hipMemcpyDeviceToHost);
// if (error_id != hipSuccess) {
// cerr<<"Error 2.0 is "<<hipGetErrorString(error_id)<<endl;
// }
// error_id = hipMemcpy((void *)h_index, (void *)d_index, sizeof(unsigned)*ITERATION*numThreadsGlobal * numBlocksGlobal, hipMemcpyDeviceToHost);
// if (error_id != hipSuccess) {
// cerr<<"Error 2.1 is "<<hipGetErrorString(error_id)<<endl;
// }
//
// //statistics
// int count_less_300 = 0, count_300_400 = 0, count_400_500 = 0, count_500_600 = 0, count_larger_600 = 0;
// double total = 0;
//
// int loop = 0; //how many times the array is looped
//
// set<int> curMissPages;
// for(i=0 ;i<ITERATION;i++) {
// int curPage = h_index[i]/stride;
// if ( (h_timeinfo[i] > 400) && (h_timeinfo[i] < 510)) {
// curMissPages.insert(curPage);
// }
// cout<<curPage<<'\t'<<h_index[i]<<'\t'<<h_timeinfo[i]<<endl;
//
// if (h_index[i]<stride) loop ++;
// if (h_timeinfo[i] < 300) count_less_300++;
// else if (h_timeinfo[i] < 400) count_300_400 ++;
// else if (h_timeinfo[i] < 500) count_400_500 ++;
// else if (h_timeinfo[i] < 600) count_500_600++;
// else count_larger_600++;
// total += h_timeinfo[i];
// }
// set<int> diffSet;
// set_difference(curMissPages.begin(), curMissPages.end(),lastMissPages.begin(), lastMissPages.end(), inserter(diffSet,diffSet.end()));
//
// //to check that pages missed in last dataset will be hit in this dataset
// set<int> checkSet;
// set_difference(lastMissPages.begin(), lastMissPages.end(), curMissPages.begin(), curMissPages.end(), inserter(checkSet,checkSet.end()));
// assert(checkSet.size() == 0);
//
// int totalPages = N /512 / 1024;
// cout<<"Pages: "<<totalPages<<", misses: "<<count_400_500<<", loops: "<<loop<<", new miss pages: ";
// for (set<int>::iterator it = diffSet.begin(); it != diffSet.end(); ++it) {
// cout<<*it<<' ';
// }
// cout<<endl;
//
// // lastMissPages = curMissPages;
//
// total = total / ITERATION;
// cout<<"Average: "<<total<<endl;
// // cout<<"Statistics:"<<endl;
// // cout<<"Data size: "<<N / 1024 / 256<<" MB."<<endl;
//
// // cout<<"less than 300: "<<count_less_300<<endl;
// // cout<<"300 - 400: "<<count_300_400<<endl;
// // cout<<"400 - 500: "<<count_400_500<<endl;
// // cout<<"500 - 600: "<<count_500_600<<endl;
// // cout<<"larger than 600: "<<count_larger_600<<endl;
// // cout<<"Average cycles: "<<total<<" in "<<ITERATION<<" iterations."<<endl;
// }
// else {
// error_id = hipMemcpy((void *)h_timeinfo, (void *)d_timeinfo, sizeof(unsigned)*numThreadsGlobal * numBlocksGlobal, hipMemcpyDeviceToHost);
// if (error_id != hipSuccess) {
// cerr<<"Error 2.2 is "<<hipGetErrorString(error_id)<<endl;
// }
//
// double total = 0; //here we use double, otherwise it will overflow
// for(int i = 0; i < numThreadsGlobal*numBlocksGlobal; i++) {
// total += h_timeinfo[i];
// }
// total /= (numThreadsGlobal*numBlocksGlobal);
// cout<<"cycle: "<<total<<endl;
// }
// hipDeviceSynchronize();
//
// /* free memory on GPU */
// if (finegrained) {
// hipFree(d_index);
// free(h_index);
// }
// else {
// hipFree(help);
// }
//
// hipFree(d_a);
// hipFree(d_timeinfo);
//
// /*free memory on CPU */
// free(h_a);
// free(h_timeinfo);
//
// hipDeviceReset();
//}
//obsolete: to record the page number and study the cache replacement policy
//__global__ void strided_access_finegrained(unsigned *arr, int length, bool record, unsigned *duration, unsigned *index) {
//
// unsigned timestamp;
// unsigned gid = blockDim.x * blockIdx.x + threadIdx.x;
// unsigned gsize = blockDim.x * gridDim.x;
// unsigned curIdx = (blockDim.x * threadIdx.x + blockIdx.x) % length;
//
// __shared__ unsigned int s_tvalue[ITERATION_FINEGRAINED*MAX_NUM_THREADS];
// __shared__ unsigned int s_index[ITERATION_FINEGRAINED*MAX_NUM_THREADS];
//
// unsigned it = gid;
// while (it < ITERATION_FINEGRAINED * MAX_NUM_THREADS) {
// s_index[it] = 0;
// s_tvalue[it] = 0;
// it += gsize;
// }
// __syncthreads();
//
// it = gid;
// for (int k = 0; k < ITERATION_FINEGRAINED; k++) {
// timestamp = clock();
// curIdx = arr[curIdx];
// s_index[it]= curIdx;
// timestamp = clock() - timestamp;
// s_tvalue[it] = timestamp;
// it += ITERATION_FINEGRAINED;
// }
//
// if (record) {
// it = threadIdx.x;
// while (it < blockDim.x * ITERATION_FINEGRAINED) {
// duration[it + blockIdx.x*blockDim.x*ITERATION_FINEGRAINED] = s_tvalue[it];
// index[it + blockIdx.x*blockDim.x*ITERATION_FINEGRAINED] = s_index[it];
// it += blockDim.x;
// }
// }
//} | 1a5055d9555f1c75384dd6b5801cc11f5985a3df.cu | #include <iostream>
#include <set>
#include <algorithm>
#include <assert.h>
#include "cuda_runtime.h"
using namespace std;
#define ITERATION_FINEGRAINED (1)
#define KB (1024/sizeof(int))
#define MB (KB*1024)
#define MAX_NUM_THREADS (1024) // a block has maximal thread size
//kernel function
__global__ void strided_access_onepass(unsigned *arr, int length, int stride, bool record, unsigned *duration, double *help); //used to benchmark the TLB structure
__global__ void strided_access_finegrained(unsigned *arr, int length, bool record, unsigned *duration, unsigned *index); //obsolete: use to attain average cycle and pages visited
void TLB_latency(int N, int stride);
void TLB_benchmarking(int beginSize, int endSize, int stride);
void generate_strided(unsigned *arr, int length, int stride);
void generate_strided_onepass(unsigned *arr, int length, int stride);
//global variables
int numThreadsGlobal;
int numBlocksGlobal;
int dataSizeGlobal; //in MB
int pageSizeGlobal; //in KB
/*
* TLB benchmarking: ./tlb_GPU pageSize_KB dataSize_begin_MB dataSize_end_MB
*
* blockSize=1 and gridSize=1 for TLB benchmarking;
*/
int main(int argc, char* argv[]){
if (argc < 4) {
cerr<<"Shall provide the blockSize, gridSize used and page size."<<endl;
cerr<<"Eg.: ./tlb_GPU bSize gSize dataSize_MB pageSize_KB"<<endl;
exit(0);
}
numThreadsGlobal = 1;
numBlocksGlobal = 1;
pageSizeGlobal = atoi(argv[1]) * KB;
int dataSize_begin = atoi(argv[2]) * MB;
int dataSize_end = atoi(argv[3]) * MB;
cudaSetDevice(0);
TLB_benchmarking(dataSize_begin, dataSize_end,pageSizeGlobal);
cudaDeviceReset();
return 0;
}
void TLB_benchmarking(int beginSize, int endSize, int stride) {
for (int ds = beginSize; ds <= endSize; ds += stride) {
cout << "Struc: Data size: " << (float)ds / MB << "MB\t" << "Stride: " << stride / MB << "MB\t";
cudaDeviceReset();
cudaError_t error_id;
unsigned *h_a, *d_a;
unsigned *h_timeinfo, *d_timeinfo;
double *help;
h_a = (unsigned*)malloc(sizeof(unsigned)*ds);
error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned)*ds);
if (error_id != cudaSuccess) cerr<<"Error 1.0 is "<<cudaGetErrorString(error_id)<<endl;
/* initialize array elements on CPU with pointers into d_a. */
generate_strided_onepass(h_a,ds,stride);
/* copy array elements from CPU to GPU */
error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned)*ds, cudaMemcpyHostToDevice);
if (error_id != cudaSuccess) cerr<<"Error 1.1 is "<<cudaGetErrorString(error_id)<<endl;
h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
error_id = cudaMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
if (error_id != cudaSuccess) cerr << "Error 1.2 is " << cudaGetErrorString(error_id) << endl;
error_id = cudaMalloc((void **) &help, sizeof(double) * numThreadsGlobal * numBlocksGlobal);
if (error_id != cudaSuccess) cerr << "Error 1.3 is " << cudaGetErrorString(error_id) << endl;
cudaThreadSynchronize();
/* launch kernel*/
dim3 Db = dim3(numThreadsGlobal);
dim3 Dg = dim3(numBlocksGlobal);
strided_access_onepass<<< Dg, Db >>> (d_a, ds, stride, false, NULL, NULL); //warp up
strided_access_onepass<<< Dg, Db >>> (d_a, ds, stride, true, d_timeinfo, help); //formal
cudaThreadSynchronize();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
cerr << "Error kernel is " << cudaGetErrorString(error_id) << endl;
}
/* copy results from GPU to CPU */
cudaThreadSynchronize();
error_id = cudaMemcpy((void *) h_timeinfo, (void *) d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost);
if (error_id != cudaSuccess) cerr << "Error 2.2 is " << cudaGetErrorString(error_id) << endl;
double total = 0; //here we use double, otherwise it will overflow
for (int i = 0; i < numThreadsGlobal * numBlocksGlobal; i++) {
total += h_timeinfo[i];
}
total /= (numThreadsGlobal * numBlocksGlobal);
cout << "cycle: " << total << endl;
cudaThreadSynchronize();
/* free memory on GPU */
cudaFree(help);
cudaFree(d_a);
cudaFree(d_timeinfo);
/*free memory on CPU */
free(h_a);
free(h_timeinfo);
cudaDeviceReset();
}
}
//used for TLB benchmarking
__global__ void strided_access_onepass(unsigned *arr, int length, int stride, bool record, unsigned *duration, double *help) {
unsigned long start, end;
unsigned gid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned curIdx = 0;
double anc = 0;
double total = 0;
int myIteration = 0;
//traverse the data array once
while (curIdx < length) {
start = clock64();
curIdx = arr[curIdx];
anc += curIdx; //to ensure the curIdx has been read, this instruction is 16-cycle long on K40m
end = clock64();
total += (end-start-16);
myIteration++;
}
if (record) {
duration[gid] = (total/myIteration);
help[gid] = anc;
}
}
void generate_strided_onepass(unsigned *arr, int length, int stride) {
for (int i = 0 ; i < length; i++) {
arr[i] = i+stride;
}
}
//void measure_global() {
//
// int stride = pageSizeGlobal*KB; //2MB stride
// set<int> missPages; //recording the overall missing pages in each case
//
// //begin and end size in MBs
// /* To test the TLB structures the beginSize and endSize is different;
// * To test the latency of multi-thread, beginSize and endSize should set as the data size tested */
// int beginSize = dataSizeGlobal * MB;
// int endSize = dataSizeGlobal * MB;
//
// //1. The L1 TLB has 16 entries. Test with N_min=28 *1024*256, N_max>32*1024*256
// //2. The L2 TLB has 65 entries. Test with N_min=128*1024*256, N_max=160*1024*256
// for (int dataSize = beginSize; dataSize <= endSize; dataSize += (128*KB)) {
//// cout<<"Data size: "<<(float)dataSize/MB<<"MB\t"<<"Stride: "<< stride/MB <<"MB"<<endl;
// cout<<"Data size: "<<(float)dataSize/MB<<"MB\tbsize: "<<numThreadsGlobal<<"\tgsize: "<<numBlocksGlobal<<'\t';
// parametric_measure_global(dataSize, false, stride, missPages); //not finegrained
// }
//}
//void TLB_finegrained(int N, bool finegrained, int stride, set<int> & lastMissPages) {
// cudaDeviceReset();
// cudaError_t error_id;
// int i;
// unsigned *h_a, *d_a;
// h_a = (unsigned*)malloc(sizeof(unsigned)*N);
// error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned)*N);
//
// if (error_id != cudaSuccess)
// cerr<<"Error 1.0 is "<<cudaGetErrorString(error_id)<<endl;
//
// /* initialize array elements on CPU with pointers into d_a. */
// generate_strided(h_a,N,stride);
// //generate_strided_onepass(h_a,N,(mul)*stride);
//
// /* copy array elements from CPU to GPU */
// error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned)*N, cudaMemcpyHostToDevice);
// if (error_id != cudaSuccess) {
// cerr<<"Error 1.1 is "<<cudaGetErrorString(error_id)<<endl;
// }
//
// unsigned *h_index, *h_timeinfo, *d_timeinfo, *d_index;
// double *help;
//
// if (finegrained) {
// h_index = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION);
// h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION);
//
// //recording time and visited locations
// error_id = cudaMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION);
// if (error_id != cudaSuccess) {
// cerr << "Error 1.2 is " << cudaGetErrorString(error_id) << endl;
// }
//
// error_id = cudaMalloc((void **) &d_index, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal *ITERATION);
// if (error_id != cudaSuccess) {
// cerr << "Error 1.3 is " << cudaGetErrorString(error_id) << endl;
// }
// }
// else {
// h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
// error_id = cudaMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal);
// if (error_id != cudaSuccess) {
// cerr << "Error 1.4 is " << cudaGetErrorString(error_id) << endl;
// }
// error_id = cudaMalloc((void **) &help, sizeof(double) * numThreadsGlobal * numBlocksGlobal);
// if (error_id != cudaSuccess) {
// cerr << "Error 1.5 is " << cudaGetErrorString(error_id) << endl;
// }
// }
//
// cudaThreadSynchronize ();
// /* launch kernel*/
// dim3 Db = dim3(numThreadsGlobal);
// dim3 Dg = dim3(numBlocksGlobal);
// if (finegrained) {
// strided_access_finegrained<<<Dg, Db>>>(d_a, N, false, NULL, NULL);
// strided_access_finegrained<<<Dg, Db>>>(d_a, N, false, d_timeinfo, d_index);
// }
// else {
// strided_access<<<Dg, Db>>>(d_a, N, stride, false, NULL, NULL); //warp up
// strided_access<<<Dg, Db>>>(d_a, N, stride, true, d_timeinfo, help); //formal
// }
//
// cudaThreadSynchronize();
//
// error_id = cudaGetLastError();
// if (error_id != cudaSuccess) {
// cerr<<"Error kernel is "<<cudaGetErrorString(error_id)<<endl;
// }
//
// /* copy results from GPU to CPU */
// cudaThreadSynchronize ();
//
// if (finegrained) {
// error_id = cudaMemcpy((void *)h_timeinfo, (void *)d_timeinfo, sizeof(unsigned)*ITERATION*numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost);
// if (error_id != cudaSuccess) {
// cerr<<"Error 2.0 is "<<cudaGetErrorString(error_id)<<endl;
// }
// error_id = cudaMemcpy((void *)h_index, (void *)d_index, sizeof(unsigned)*ITERATION*numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost);
// if (error_id != cudaSuccess) {
// cerr<<"Error 2.1 is "<<cudaGetErrorString(error_id)<<endl;
// }
//
// //statistics
// int count_less_300 = 0, count_300_400 = 0, count_400_500 = 0, count_500_600 = 0, count_larger_600 = 0;
// double total = 0;
//
// int loop = 0; //how many times the array is looped
//
// set<int> curMissPages;
// for(i=0 ;i<ITERATION;i++) {
// int curPage = h_index[i]/stride;
// if ( (h_timeinfo[i] > 400) && (h_timeinfo[i] < 510)) {
// curMissPages.insert(curPage);
// }
// cout<<curPage<<'\t'<<h_index[i]<<'\t'<<h_timeinfo[i]<<endl;
//
// if (h_index[i]<stride) loop ++;
// if (h_timeinfo[i] < 300) count_less_300++;
// else if (h_timeinfo[i] < 400) count_300_400 ++;
// else if (h_timeinfo[i] < 500) count_400_500 ++;
// else if (h_timeinfo[i] < 600) count_500_600++;
// else count_larger_600++;
// total += h_timeinfo[i];
// }
// set<int> diffSet;
// set_difference(curMissPages.begin(), curMissPages.end(),lastMissPages.begin(), lastMissPages.end(), inserter(diffSet,diffSet.end()));
//
// //to check that pages missed in last dataset will be hit in this dataset
// set<int> checkSet;
// set_difference(lastMissPages.begin(), lastMissPages.end(), curMissPages.begin(), curMissPages.end(), inserter(checkSet,checkSet.end()));
// assert(checkSet.size() == 0);
//
// int totalPages = N /512 / 1024;
// cout<<"Pages: "<<totalPages<<", misses: "<<count_400_500<<", loops: "<<loop<<", new miss pages: ";
// for (set<int>::iterator it = diffSet.begin(); it != diffSet.end(); ++it) {
// cout<<*it<<' ';
// }
// cout<<endl;
//
// // lastMissPages = curMissPages;
//
// total = total / ITERATION;
// cout<<"Average: "<<total<<endl;
// // cout<<"Statistics:"<<endl;
// // cout<<"Data size: "<<N / 1024 / 256<<" MB."<<endl;
//
// // cout<<"less than 300: "<<count_less_300<<endl;
// // cout<<"300 - 400: "<<count_300_400<<endl;
// // cout<<"400 - 500: "<<count_400_500<<endl;
// // cout<<"500 - 600: "<<count_500_600<<endl;
// // cout<<"larger than 600: "<<count_larger_600<<endl;
// // cout<<"Average cycles: "<<total<<" in "<<ITERATION<<" iterations."<<endl;
// }
// else {
// error_id = cudaMemcpy((void *)h_timeinfo, (void *)d_timeinfo, sizeof(unsigned)*numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost);
// if (error_id != cudaSuccess) {
// cerr<<"Error 2.2 is "<<cudaGetErrorString(error_id)<<endl;
// }
//
// double total = 0; //here we use double, otherwise it will overflow
// for(int i = 0; i < numThreadsGlobal*numBlocksGlobal; i++) {
// total += h_timeinfo[i];
// }
// total /= (numThreadsGlobal*numBlocksGlobal);
// cout<<"cycle: "<<total<<endl;
// }
// cudaThreadSynchronize();
//
// /* free memory on GPU */
// if (finegrained) {
// cudaFree(d_index);
// free(h_index);
// }
// else {
// cudaFree(help);
// }
//
// cudaFree(d_a);
// cudaFree(d_timeinfo);
//
// /*free memory on CPU */
// free(h_a);
// free(h_timeinfo);
//
// cudaDeviceReset();
//}
//obsolete: to record the page number and study the cache replacement policy
//__global__ void strided_access_finegrained(unsigned *arr, int length, bool record, unsigned *duration, unsigned *index) {
//
// unsigned timestamp;
// unsigned gid = blockDim.x * blockIdx.x + threadIdx.x;
// unsigned gsize = blockDim.x * gridDim.x;
// unsigned curIdx = (blockDim.x * threadIdx.x + blockIdx.x) % length;
//
// __shared__ unsigned int s_tvalue[ITERATION_FINEGRAINED*MAX_NUM_THREADS];
// __shared__ unsigned int s_index[ITERATION_FINEGRAINED*MAX_NUM_THREADS];
//
// unsigned it = gid;
// while (it < ITERATION_FINEGRAINED * MAX_NUM_THREADS) {
// s_index[it] = 0;
// s_tvalue[it] = 0;
// it += gsize;
// }
// __syncthreads();
//
// it = gid;
// for (int k = 0; k < ITERATION_FINEGRAINED; k++) {
// timestamp = clock();
// curIdx = arr[curIdx];
// s_index[it]= curIdx;
// timestamp = clock() - timestamp;
// s_tvalue[it] = timestamp;
// it += ITERATION_FINEGRAINED;
// }
//
// if (record) {
// it = threadIdx.x;
// while (it < blockDim.x * ITERATION_FINEGRAINED) {
// duration[it + blockIdx.x*blockDim.x*ITERATION_FINEGRAINED] = s_tvalue[it];
// index[it + blockIdx.x*blockDim.x*ITERATION_FINEGRAINED] = s_index[it];
// it += blockDim.x;
// }
// }
//} |
838d5bfce1c701526ac094f9439cc760fda74ed1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define USE_CPU 0
#include "blocksfuncs.h"
#include <cstdio>
#include <cstdlib>
#define CHECK_CUDA_ERROR() \
{ \
hipError_t err = hipGetLastError(); \
if (err != hipSuccess) \
{ \
printf("%s.%s.%d: %s (error code %d).\n", __FILE__, __FUNCTION__, __LINE__, hipGetErrorString(err), err); \
fflush(stdout); \
exit(1); \
} \
} \
const int NUM_BLOCKS = 30;
void testSortCPU();
template <typename T> T * readFromFile(const char * const fileName, int & numElems);
void checkOutput(const int numUniqueWords, void * gpuCounts);
void runKernel0(const int numLines,
void * gpuData,
void * gpuG,
void * gpuT0,
void * gpuT1,
void * gpuT2,
void * gpuCounts,
const int numUniqueWords);
void runKernel1(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords);
void runKernel2(void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords);
void runKernel3(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords);
void runKernel4(void * gpuData,
void * gpuG,
const int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts);
void runKernel5(void * gpuData,
void * gpuG,
const int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts);
__constant__ unsigned int hashConstT0[1624];
__constant__ unsigned int hashConstT1[1624];
__constant__ unsigned int hashConstT2[1624];
__host__ __device__ int hashFunction(const char *& key,
const int * const hashG,
const unsigned int * const hashT0,
const unsigned int * const hashT1,
const unsigned int * const hashT2)
{
unsigned int f0 = 0, f1 = 0, f2 = 0;
int i = -65;
while (*key > ' ')
{
f0 += hashT0[i + *key];
f1 += hashT1[i + *key];
f2 += hashT2[i + *key];
i += 58;
++key;
}
f0 %= 52729;
f1 %= 52729;
f2 %= 52729;
return (hashG[f0] + hashG[f1] + hashG[f2]) % 42869;
}
__global__ void kernel(int * numWords, int * globalOffset)
{
*numWords = 0;
*globalOffset = 0;
}
__global__ void wordCountKernel0(const int blockOffset,
void * const data,
const int * hashG,
const unsigned int * hashT0,
const unsigned int * hashT1,
const unsigned int * hashT2,
unsigned int * const counts)
{
const int globalIndex = (blockOffset + blockIdx.x) * blockDim.x + threadIdx.x;
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// const int * const lineLengths = lineOffsets + numLines;
// int wordCount = 0;
if (globalIndex < numLines)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else
{
const int wordIndex = hashFunction(wordData, hashG, hashT0, hashT1, hashT2);
atomicAdd(counts + wordIndex, 1);
}
}
}
}
__global__ void wordCountKernel1(const int blockOffset,
void * const data,
const int * hashG,
unsigned int * const counts)
{
const int globalIndex = (blockOffset + blockIdx.x) * blockDim.x + threadIdx.x;
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// const int * const lineLengths = lineOffsets + numLines;
// int wordCount = 0;
if (globalIndex < numLines)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else
{
const int wordIndex = hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2);
atomicAdd(counts + wordIndex, 1);
}
}
}
}
__global__ void wordCountKernel2(void * const data,
const int * hashG,
unsigned int * const counts)
{
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// const int * const lineLengths = lineOffsets + numLines;
// int wordCount = 0;
for (int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; globalIndex < numLines; globalIndex += gridDim.x * blockDim.x)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else
{
const int wordIndex = hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2);
atomicAdd(counts + wordIndex, 1);
}
}
}
}
__device__ void checkCharacterForHash(const char key, bool & inWord, unsigned int & f0, unsigned int & f1, unsigned int & f2, int & i, const int * hashG, unsigned int * const counts)
{
if (key > ' ')
{
inWord = true;
f0 += hashConstT0[i + key];
f1 += hashConstT1[i + key];
f2 += hashConstT2[i + key];
i += 58;
}
else if (inWord)
{
inWord = false;
atomicAdd(counts + (hashG[f0 % 52729] + hashG[f1 % 52729] + hashG[f2 % 52729]) % 42869, 1);
f0 = f1 = f2 = 0;
i = -65;
}
}
__global__ void wordCountKernel3(const int blockOffset,
void * const data,
const int * hashG,
unsigned int * const counts)
{
const int globalIndex = (blockOffset + blockIdx.x) * blockDim.x + threadIdx.x;
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// int wordCount = 0;
if (globalIndex < numLines)
{
unsigned int f0 = 0, f1 = 0, f2 = 0;
int i = -65;
bool inWord = false;
#if 0
const char * wordData = reinterpret_cast<const char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
checkCharacterForHash(*(wordData++), inWord, f0, f1, f2, i, hashG, counts);
}
checkCharacterForHash(*wordData, inWord, f0, f1, f2, i, hashG, counts);
#else
const char4 * wordData = reinterpret_cast<const char4 * >(reinterpret_cast<const char * >(data) + lineOffsets[globalIndex]);
char4 patch, nextPatch = *(wordData++);
do
{
patch = nextPatch;
nextPatch = *(wordData++);
checkCharacterForHash(patch.x, inWord, f0, f1, f2, i, hashG, counts);
checkCharacterForHash(patch.y, inWord, f0, f1, f2, i, hashG, counts);
checkCharacterForHash(patch.z, inWord, f0, f1, f2, i, hashG, counts);
checkCharacterForHash(patch.w, inWord, f0, f1, f2, i, hashG, counts);
}
while (patch.w != '\n');
checkCharacterForHash(patch.w, inWord, f0, f1, f2, i, hashG, counts);
#endif
}
}
__global__ void wordCountKernel4(void * const data,
const int * hashG,
const int numUniqueWords,
unsigned int * counts)
{
__shared__ int wordIndices[THREADS_PER_BLOCK];
__shared__ int done[THREADS_PER_BLOCK];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
counts += numUniqueWords * blockIdx.x;
for (int globalOffset = blockIdx.x * blockDim.x; globalOffset < numLines; globalOffset += gridDim.x * blockDim.x)
{
const int globalIndex = globalOffset + threadIdx.x;
const char * wordData;
if (globalIndex < numLines) wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
do
{
if (globalIndex >= numLines || *wordData == '\n')
{
wordIndices[threadIdx.x] = -1;
done[threadIdx.x] = 1;
}
else
{
while (*wordData == ' ') ++wordData;
if (*wordData == '\n')
{
wordIndices[threadIdx.x] = -1;
done[threadIdx.x] = 1;
}
else
{
wordIndices[threadIdx.x] = hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2);
done[threadIdx.x] = 0;
}
}
#if 0
bitonicSort<int, 1>(wordIndices);
const int wordIndex = wordIndices[threadIdx.x];
int count = 1;
if (wordIndex != -1 && (threadIdx.x == 0 || wordIndices[threadIdx.x - 1] != wordIndex))
{
int index = threadIdx.x + 1;
while (index < THREADS_PER_BLOCK && wordIndices[index] == wordIndex)
{
++index;
++count;
}
counts[wordIndex] += count;
}
#else
if (wordIndices[threadIdx.x] != -1) counts[wordIndices[threadIdx.x]]++;
#endif
reduceAdd<int>(done);
}
while (done[0] < THREADS_PER_BLOCK);
}
__syncthreads();
}
__global__ void wordCountKernel5(void * const data,
const int * hashG,
const int numUniqueWords,
unsigned int * counts)
{
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
counts += numUniqueWords * blockIdx.x;
for (int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; globalIndex < numLines; globalIndex += gridDim.x * blockDim.x)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else counts[hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2)]++;
}
}
}
template <int ELEMS_PER_THREAD>
__global__ void wordCountReduce(const int numUniqueWords, unsigned int * const counts, const unsigned int * const blockCounts)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int accum = 0;
for (int i = 0; i < ELEMS_PER_THREAD; ++i)
{
accum += blockCounts[index + numUniqueWords * i];
}
counts[index] = accum;
}
template <int THREADS, int KEYS_PER_THREAD>
__global__ void testSort(int * keys, int * vals)
{
__shared__ int sharedKeys[THREADS * KEYS_PER_THREAD];
__shared__ int sharedVals[THREADS * KEYS_PER_THREAD];
for (int i = 0; i < KEYS_PER_THREAD; ++i)
{
sharedKeys[threadIdx.x + blockDim.x * i] = keys[threadIdx.x + blockDim.x * i];
sharedVals[threadIdx.x + blockDim.x * i] = vals[threadIdx.x + blockDim.x * i];
}
bitonicSort<int, int, KEYS_PER_THREAD>(sharedKeys, sharedVals);
for (int i = 0; i < KEYS_PER_THREAD; ++i)
{
keys[threadIdx.x + blockDim.x * i] = sharedKeys[threadIdx.x + blockDim.x * i];
vals[threadIdx.x + blockDim.x * i] = sharedVals[threadIdx.x + blockDim.x * i];
}
}
int main(int argc, char ** argv)
{
const int NUM_KERNELS = 6;
hipEvent_t beginEvents[NUM_KERNELS];
hipEvent_t endEvents[NUM_KERNELS];
int numWords, numLines, len, numUniqueWords = 42869;
char * inputFile, * ptr;
int gElems, t0Elems, t1Elems, t2Elems;
int * cpuG;
unsigned int * cpuT0, * cpuT1, * cpuT2;
cpuG = readFromFile<int> ("/home/stuart/pico/data/wordcount/wordlist.g", gElems);
cpuT0 = readFromFile<unsigned int>("/home/stuart/pico/data/wordcount/wordlist.T0", t0Elems);
cpuT1 = readFromFile<unsigned int>("/home/stuart/pico/data/wordcount/wordlist.T1", t1Elems);
cpuT2 = readFromFile<unsigned int>("/home/stuart/pico/data/wordcount/wordlist.T2", t2Elems);
FILE * fp = fopen(argv[1], "rb");
fseek(fp, 0, SEEK_END);
len = ftell(fp);
fseek(fp, 0, SEEK_SET);
inputFile = new char[len];
fread(inputFile, len, 1, fp);
fclose(fp);
ptr = inputFile;
numWords = *reinterpret_cast<int * >(ptr); ptr += sizeof(int);
numLines = *reinterpret_cast<int * >(ptr); ptr += sizeof(int);
ptr += 120;
const int realNumUniqueWords = numUniqueWords;
if (numUniqueWords % THREADS_PER_BLOCK != 0) numUniqueWords += THREADS_PER_BLOCK - numUniqueWords % THREADS_PER_BLOCK;
printf("%d bytes, %d words and %d lines.\n", len, numWords, numLines);
fflush(stdout);
void * gpuData, * gpuG, * gpuCounts, * gpuT0, * gpuT1, * gpuT2, * gpuGlobalOffset, * gpuBlockCounts;
hipMalloc(&gpuData, len); CHECK_CUDA_ERROR();
hipMalloc(&gpuG, gElems * sizeof(int)); CHECK_CUDA_ERROR();
hipMalloc(&gpuT0, t0Elems * sizeof(unsigned int)); CHECK_CUDA_ERROR();
hipMalloc(&gpuT1, t1Elems * sizeof(unsigned int)); CHECK_CUDA_ERROR();
hipMalloc(&gpuT2, t2Elems * sizeof(unsigned int)); CHECK_CUDA_ERROR();
hipMalloc(&gpuCounts, numUniqueWords * sizeof(int)); CHECK_CUDA_ERROR();
hipMalloc(&gpuGlobalOffset, sizeof(int)); CHECK_CUDA_ERROR();
hipMalloc(&gpuBlockCounts, numUniqueWords * sizeof(int) * NUM_BLOCKS); CHECK_CUDA_ERROR();
hipMemcpy(gpuData, inputFile, len, hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemcpy(gpuG, cpuG, gElems * sizeof(int), hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemcpy(gpuT0, cpuT0, t0Elems * sizeof(unsigned int), hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemcpy(gpuT1, cpuT1, t1Elems * sizeof(unsigned int), hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemcpy(gpuT2, cpuT2, t2Elems * sizeof(unsigned int), hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemcpyToSymbol("hashConstT0", cpuT0, t0Elems * sizeof(unsigned int), 0, hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemcpyToSymbol("hashConstT1", cpuT1, t1Elems * sizeof(unsigned int), 0, hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemcpyToSymbol("hashConstT2", cpuT2, t2Elems * sizeof(unsigned int), 0, hipMemcpyHostToDevice); CHECK_CUDA_ERROR();
hipMemset(gpuGlobalOffset, 0, sizeof(int)); CHECK_CUDA_ERROR();
for (int i = 0; i < NUM_KERNELS; ++i)
{
hipEventCreate(beginEvents + i);
hipEventCreate(endEvents + i);
}
delete [] cpuG;
delete [] cpuT0;
delete [] cpuT1;
delete [] cpuT2;
delete [] inputFile;
int KERNEL_INDEX = 0;
hipMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
hipStreamSynchronize(0); CHECK_CUDA_ERROR();
hipEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel0(numLines, gpuData, gpuG, gpuT0, gpuT1, gpuT2, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
hipEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
hipEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
hipMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
hipStreamSynchronize(0); CHECK_CUDA_ERROR();
hipEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel1(numLines, gpuData, gpuG, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
hipEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
hipEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
hipMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
hipStreamSynchronize(0); CHECK_CUDA_ERROR();
hipEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel2(gpuData, gpuG, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
hipEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
hipEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
hipMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
hipStreamSynchronize(0); CHECK_CUDA_ERROR();
hipEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel3(numLines, gpuData, gpuG, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
hipEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
hipEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
hipMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
hipMemset(gpuBlockCounts, 0, sizeof(int) * numUniqueWords * NUM_BLOCKS); CHECK_CUDA_ERROR();
hipStreamSynchronize(0); CHECK_CUDA_ERROR();
hipEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel4(gpuData, gpuG, numUniqueWords, gpuCounts, gpuBlockCounts); CHECK_CUDA_ERROR();
hipEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
hipEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
hipMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
hipMemset(gpuBlockCounts, 0, sizeof(int) * numUniqueWords * NUM_BLOCKS); CHECK_CUDA_ERROR();
hipStreamSynchronize(0); CHECK_CUDA_ERROR();
hipEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel5(gpuData, gpuG, numUniqueWords, gpuCounts, gpuBlockCounts); CHECK_CUDA_ERROR();
hipEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
hipEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
for (int i = 0; i < KERNEL_INDEX; ++i)
{
float ms;
hipEventElapsedTime(&ms, beginEvents[i], endEvents[i]);
printf("kernel %d took %.3f ms.\n", i, ms);
}
return 0;
}
void testSortCPU()
{
const int THREADS = THREADS_PER_BLOCK;
const int KEYS_PER_THREAD = 4;
int * keys = new int[THREADS * KEYS_PER_THREAD];
int * vals = new int[THREADS * KEYS_PER_THREAD];
int * gpuKeys, * gpuVals;
hipMalloc(reinterpret_cast<void ** >(&gpuKeys), sizeof(int) * THREADS * KEYS_PER_THREAD);
hipMalloc(reinterpret_cast<void ** >(&gpuVals), sizeof(int) * THREADS * KEYS_PER_THREAD);
for (int i = 0; i < THREADS * KEYS_PER_THREAD; ++i)
{
keys[i] = i;
vals[i] = THREADS * KEYS_PER_THREAD - i - 1;
}
for (int i = 0; i < THREADS * KEYS_PER_THREAD; ++i)
{
const int ind0 = rand() % (THREADS * KEYS_PER_THREAD);
const int ind1 = rand() % (THREADS * KEYS_PER_THREAD);
swap(keys[ind0], keys[ind1]);
swap(vals[ind0], vals[ind1]);
}
hipMemcpy(gpuKeys, keys, sizeof(int) * THREADS * KEYS_PER_THREAD, hipMemcpyHostToDevice);
hipMemcpy(gpuVals, vals, sizeof(int) * THREADS * KEYS_PER_THREAD, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( testSort<THREADS, KEYS_PER_THREAD>), dim3(1), dim3(THREADS), 0, 0, gpuKeys, gpuVals);
hipMemcpy(keys, gpuKeys, sizeof(int) * THREADS * KEYS_PER_THREAD, hipMemcpyDeviceToHost);
hipMemcpy(vals, gpuVals, sizeof(int) * THREADS * KEYS_PER_THREAD, hipMemcpyDeviceToHost);
for (int i = 0; i < THREADS * KEYS_PER_THREAD; ++i)
{
printf("%4d: %4d %4d\n", i, keys[i], vals[i]);
}
fflush(stdout);
hipFree(gpuKeys);
hipFree(gpuVals);
delete [] keys;
delete [] vals;
}
template <typename T>
T * readFromFile(const char * const fileName, int & numElems)
{
T * ret;
FILE * fp = fopen(fileName, "rb");
fread(&numElems, sizeof(numElems), 1, fp);
numElems /= sizeof(T);
ret = new T[numElems];
fread(ret, numElems * sizeof(T), 1, fp);
fclose(fp);
return ret;
}
void runKernel0(const int numLines,
void * gpuData,
void * gpuG,
void * gpuT0,
void * gpuT1,
void * gpuT2,
void * gpuCounts,
const int numUniqueWords)
{
const int totalBlocks = (numLines + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int blocksSoFar = 0;
while (blocksSoFar < totalBlocks)
{
int numBlocks = (totalBlocks - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : totalBlocks - blocksSoFar);
printf("running %d blocks and %d threads with %d blocks before this.\n", numBlocks, THREADS_PER_BLOCK, blocksSoFar);
hipLaunchKernelGGL(( wordCountKernel0), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, blocksSoFar,
gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuT0),
reinterpret_cast<unsigned int * >(gpuT1),
reinterpret_cast<unsigned int * >(gpuT2),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
blocksSoFar += numBlocks;
}
}
void runKernel1(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords)
{
const int totalBlocks = (numLines + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int blocksSoFar = 0;
while (blocksSoFar < totalBlocks)
{
int numBlocks = (totalBlocks - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : totalBlocks - blocksSoFar);
printf("running %d blocks and %d threads with %d blocks before this.\n", numBlocks, THREADS_PER_BLOCK, blocksSoFar);
hipLaunchKernelGGL(( wordCountKernel1), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, blocksSoFar,
gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
blocksSoFar += numBlocks;
}
}
void runKernel2(void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords)
{
int numBlocks = NUM_BLOCKS;
printf("running %d blocks and %d threads.\n", numBlocks, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( wordCountKernel2), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
}
void runKernel3(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords)
{
const int totalBlocks = (numLines + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int blocksSoFar = 0;
while (blocksSoFar < totalBlocks)
{
int numBlocks = (totalBlocks - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : totalBlocks - blocksSoFar);
printf("running %d blocks and %d threads with %d blocks before this.\n", numBlocks, THREADS_PER_BLOCK, blocksSoFar);
hipLaunchKernelGGL(( wordCountKernel3), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, blocksSoFar,
gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
blocksSoFar += numBlocks;
}
}
void runKernel4(void * gpuData,
void * gpuG,
int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts)
{
printf("running %d blocks and %d threads.\n", NUM_BLOCKS, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( wordCountKernel4), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, gpuData,
reinterpret_cast<int * >(gpuG),
numUniqueWords,
reinterpret_cast<unsigned int * >(gpuBlockCounts));
// hipMemcpyAsync(gpuCounts, gpuBlockCounts, sizeof(int) * numUniqueWords, hipMemcpyDeviceToDevice, 0);
int numBlocks = (numUniqueWords + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( wordCountReduce<NUM_BLOCKS>), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, numUniqueWords,
reinterpret_cast<unsigned int * >(gpuCounts),
reinterpret_cast<unsigned int * >(gpuBlockCounts));
CHECK_CUDA_ERROR();
}
void runKernel5(void * gpuData,
void * gpuG,
int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts)
{
printf("running %d blocks and %d threads.\n", NUM_BLOCKS, THREADS_PER_BLOCK);
hipLaunchKernelGGL(( wordCountKernel5), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, gpuData,
reinterpret_cast<int * >(gpuG),
numUniqueWords,
reinterpret_cast<unsigned int * >(gpuBlockCounts));
// hipMemcpyAsync(gpuCounts, gpuBlockCounts, sizeof(int) * numUniqueWords, hipMemcpyDeviceToDevice, 0);
int numBlocks = (numUniqueWords + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( wordCountReduce<NUM_BLOCKS>), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, numUniqueWords,
reinterpret_cast<unsigned int * >(gpuCounts),
reinterpret_cast<unsigned int * >(gpuBlockCounts));
CHECK_CUDA_ERROR();
}
void checkOutput(const int numUniqueWords, void * gpuCounts)
{
int * cpuCounts = new int[numUniqueWords];
hipMemcpy(cpuCounts, gpuCounts, sizeof(int) * numUniqueWords, hipMemcpyDeviceToHost); CHECK_CUDA_ERROR();
int zero = 0;
for (int i = 0; i < numUniqueWords; ++i)
{
if (cpuCounts[i] == 0)
{
++zero;
printf("%d: %d\n", i, cpuCounts[i]);
}
}
for (int i = 0; i < 10; ++i) printf("%d: %d\n", i, cpuCounts[i]);
printf("%d zero-valued entries.\n", zero);
delete [] cpuCounts;
}
| 838d5bfce1c701526ac094f9439cc760fda74ed1.cu | #define USE_CPU 0
#include "blocksfuncs.h"
#include <cstdio>
#include <cstdlib>
#define CHECK_CUDA_ERROR() \
{ \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess) \
{ \
printf("%s.%s.%d: %s (error code %d).\n", __FILE__, __FUNCTION__, __LINE__, cudaGetErrorString(err), err); \
fflush(stdout); \
exit(1); \
} \
} \
const int NUM_BLOCKS = 30;
void testSortCPU();
template <typename T> T * readFromFile(const char * const fileName, int & numElems);
void checkOutput(const int numUniqueWords, void * gpuCounts);
void runKernel0(const int numLines,
void * gpuData,
void * gpuG,
void * gpuT0,
void * gpuT1,
void * gpuT2,
void * gpuCounts,
const int numUniqueWords);
void runKernel1(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords);
void runKernel2(void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords);
void runKernel3(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords);
void runKernel4(void * gpuData,
void * gpuG,
const int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts);
void runKernel5(void * gpuData,
void * gpuG,
const int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts);
__constant__ unsigned int hashConstT0[1624];
__constant__ unsigned int hashConstT1[1624];
__constant__ unsigned int hashConstT2[1624];
__host__ __device__ int hashFunction(const char *& key,
const int * const hashG,
const unsigned int * const hashT0,
const unsigned int * const hashT1,
const unsigned int * const hashT2)
{
unsigned int f0 = 0, f1 = 0, f2 = 0;
int i = -65;
while (*key > ' ')
{
f0 += hashT0[i + *key];
f1 += hashT1[i + *key];
f2 += hashT2[i + *key];
i += 58;
++key;
}
f0 %= 52729;
f1 %= 52729;
f2 %= 52729;
return (hashG[f0] + hashG[f1] + hashG[f2]) % 42869;
}
__global__ void kernel(int * numWords, int * globalOffset)
{
*numWords = 0;
*globalOffset = 0;
}
__global__ void wordCountKernel0(const int blockOffset,
void * const data,
const int * hashG,
const unsigned int * hashT0,
const unsigned int * hashT1,
const unsigned int * hashT2,
unsigned int * const counts)
{
const int globalIndex = (blockOffset + blockIdx.x) * blockDim.x + threadIdx.x;
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// const int * const lineLengths = lineOffsets + numLines;
// int wordCount = 0;
if (globalIndex < numLines)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else
{
const int wordIndex = hashFunction(wordData, hashG, hashT0, hashT1, hashT2);
atomicAdd(counts + wordIndex, 1);
}
}
}
}
__global__ void wordCountKernel1(const int blockOffset,
void * const data,
const int * hashG,
unsigned int * const counts)
{
const int globalIndex = (blockOffset + blockIdx.x) * blockDim.x + threadIdx.x;
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// const int * const lineLengths = lineOffsets + numLines;
// int wordCount = 0;
if (globalIndex < numLines)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else
{
const int wordIndex = hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2);
atomicAdd(counts + wordIndex, 1);
}
}
}
}
__global__ void wordCountKernel2(void * const data,
const int * hashG,
unsigned int * const counts)
{
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// const int * const lineLengths = lineOffsets + numLines;
// int wordCount = 0;
for (int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; globalIndex < numLines; globalIndex += gridDim.x * blockDim.x)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else
{
const int wordIndex = hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2);
atomicAdd(counts + wordIndex, 1);
}
}
}
}
__device__ void checkCharacterForHash(const char key, bool & inWord, unsigned int & f0, unsigned int & f1, unsigned int & f2, int & i, const int * hashG, unsigned int * const counts)
{
if (key > ' ')
{
inWord = true;
f0 += hashConstT0[i + key];
f1 += hashConstT1[i + key];
f2 += hashConstT2[i + key];
i += 58;
}
else if (inWord)
{
inWord = false;
atomicAdd(counts + (hashG[f0 % 52729] + hashG[f1 % 52729] + hashG[f2 % 52729]) % 42869, 1);
f0 = f1 = f2 = 0;
i = -65;
}
}
__global__ void wordCountKernel3(const int blockOffset,
void * const data,
const int * hashG,
unsigned int * const counts)
{
const int globalIndex = (blockOffset + blockIdx.x) * blockDim.x + threadIdx.x;
// const int numWords = reinterpret_cast<const int * >(data)[0];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
// int wordCount = 0;
if (globalIndex < numLines)
{
unsigned int f0 = 0, f1 = 0, f2 = 0;
int i = -65;
bool inWord = false;
#if 0
const char * wordData = reinterpret_cast<const char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
checkCharacterForHash(*(wordData++), inWord, f0, f1, f2, i, hashG, counts);
}
checkCharacterForHash(*wordData, inWord, f0, f1, f2, i, hashG, counts);
#else
const char4 * wordData = reinterpret_cast<const char4 * >(reinterpret_cast<const char * >(data) + lineOffsets[globalIndex]);
char4 patch, nextPatch = *(wordData++);
do
{
patch = nextPatch;
nextPatch = *(wordData++);
checkCharacterForHash(patch.x, inWord, f0, f1, f2, i, hashG, counts);
checkCharacterForHash(patch.y, inWord, f0, f1, f2, i, hashG, counts);
checkCharacterForHash(patch.z, inWord, f0, f1, f2, i, hashG, counts);
checkCharacterForHash(patch.w, inWord, f0, f1, f2, i, hashG, counts);
}
while (patch.w != '\n');
checkCharacterForHash(patch.w, inWord, f0, f1, f2, i, hashG, counts);
#endif
}
}
__global__ void wordCountKernel4(void * const data,
const int * hashG,
const int numUniqueWords,
unsigned int * counts)
{
__shared__ int wordIndices[THREADS_PER_BLOCK];
__shared__ int done[THREADS_PER_BLOCK];
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
counts += numUniqueWords * blockIdx.x;
for (int globalOffset = blockIdx.x * blockDim.x; globalOffset < numLines; globalOffset += gridDim.x * blockDim.x)
{
const int globalIndex = globalOffset + threadIdx.x;
const char * wordData;
if (globalIndex < numLines) wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
do
{
if (globalIndex >= numLines || *wordData == '\n')
{
wordIndices[threadIdx.x] = -1;
done[threadIdx.x] = 1;
}
else
{
while (*wordData == ' ') ++wordData;
if (*wordData == '\n')
{
wordIndices[threadIdx.x] = -1;
done[threadIdx.x] = 1;
}
else
{
wordIndices[threadIdx.x] = hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2);
done[threadIdx.x] = 0;
}
}
#if 0
bitonicSort<int, 1>(wordIndices);
const int wordIndex = wordIndices[threadIdx.x];
int count = 1;
if (wordIndex != -1 && (threadIdx.x == 0 || wordIndices[threadIdx.x - 1] != wordIndex))
{
int index = threadIdx.x + 1;
while (index < THREADS_PER_BLOCK && wordIndices[index] == wordIndex)
{
++index;
++count;
}
counts[wordIndex] += count;
}
#else
if (wordIndices[threadIdx.x] != -1) counts[wordIndices[threadIdx.x]]++;
#endif
reduceAdd<int>(done);
}
while (done[0] < THREADS_PER_BLOCK);
}
__syncthreads();
}
__global__ void wordCountKernel5(void * const data,
const int * hashG,
const int numUniqueWords,
unsigned int * counts)
{
const int numLines = reinterpret_cast<const int * >(data)[1];
const int * const lineOffsets = reinterpret_cast<const int * >(data) + 32;
counts += numUniqueWords * blockIdx.x;
for (int globalIndex = blockIdx.x * blockDim.x + threadIdx.x; globalIndex < numLines; globalIndex += gridDim.x * blockDim.x)
{
const char * wordData = reinterpret_cast<char * >(data) + lineOffsets[globalIndex];
while (*wordData != '\n')
{
if (*wordData == ' ') ++wordData;
else counts[hashFunction(wordData, hashG, hashConstT0, hashConstT1, hashConstT2)]++;
}
}
}
template <int ELEMS_PER_THREAD>
__global__ void wordCountReduce(const int numUniqueWords, unsigned int * const counts, const unsigned int * const blockCounts)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int accum = 0;
for (int i = 0; i < ELEMS_PER_THREAD; ++i)
{
accum += blockCounts[index + numUniqueWords * i];
}
counts[index] = accum;
}
template <int THREADS, int KEYS_PER_THREAD>
__global__ void testSort(int * keys, int * vals)
{
__shared__ int sharedKeys[THREADS * KEYS_PER_THREAD];
__shared__ int sharedVals[THREADS * KEYS_PER_THREAD];
for (int i = 0; i < KEYS_PER_THREAD; ++i)
{
sharedKeys[threadIdx.x + blockDim.x * i] = keys[threadIdx.x + blockDim.x * i];
sharedVals[threadIdx.x + blockDim.x * i] = vals[threadIdx.x + blockDim.x * i];
}
bitonicSort<int, int, KEYS_PER_THREAD>(sharedKeys, sharedVals);
for (int i = 0; i < KEYS_PER_THREAD; ++i)
{
keys[threadIdx.x + blockDim.x * i] = sharedKeys[threadIdx.x + blockDim.x * i];
vals[threadIdx.x + blockDim.x * i] = sharedVals[threadIdx.x + blockDim.x * i];
}
}
int main(int argc, char ** argv)
{
const int NUM_KERNELS = 6;
cudaEvent_t beginEvents[NUM_KERNELS];
cudaEvent_t endEvents[NUM_KERNELS];
int numWords, numLines, len, numUniqueWords = 42869;
char * inputFile, * ptr;
int gElems, t0Elems, t1Elems, t2Elems;
int * cpuG;
unsigned int * cpuT0, * cpuT1, * cpuT2;
cpuG = readFromFile<int> ("/home/stuart/pico/data/wordcount/wordlist.g", gElems);
cpuT0 = readFromFile<unsigned int>("/home/stuart/pico/data/wordcount/wordlist.T0", t0Elems);
cpuT1 = readFromFile<unsigned int>("/home/stuart/pico/data/wordcount/wordlist.T1", t1Elems);
cpuT2 = readFromFile<unsigned int>("/home/stuart/pico/data/wordcount/wordlist.T2", t2Elems);
FILE * fp = fopen(argv[1], "rb");
fseek(fp, 0, SEEK_END);
len = ftell(fp);
fseek(fp, 0, SEEK_SET);
inputFile = new char[len];
fread(inputFile, len, 1, fp);
fclose(fp);
ptr = inputFile;
numWords = *reinterpret_cast<int * >(ptr); ptr += sizeof(int);
numLines = *reinterpret_cast<int * >(ptr); ptr += sizeof(int);
ptr += 120;
const int realNumUniqueWords = numUniqueWords;
if (numUniqueWords % THREADS_PER_BLOCK != 0) numUniqueWords += THREADS_PER_BLOCK - numUniqueWords % THREADS_PER_BLOCK;
printf("%d bytes, %d words and %d lines.\n", len, numWords, numLines);
fflush(stdout);
void * gpuData, * gpuG, * gpuCounts, * gpuT0, * gpuT1, * gpuT2, * gpuGlobalOffset, * gpuBlockCounts;
cudaMalloc(&gpuData, len); CHECK_CUDA_ERROR();
cudaMalloc(&gpuG, gElems * sizeof(int)); CHECK_CUDA_ERROR();
cudaMalloc(&gpuT0, t0Elems * sizeof(unsigned int)); CHECK_CUDA_ERROR();
cudaMalloc(&gpuT1, t1Elems * sizeof(unsigned int)); CHECK_CUDA_ERROR();
cudaMalloc(&gpuT2, t2Elems * sizeof(unsigned int)); CHECK_CUDA_ERROR();
cudaMalloc(&gpuCounts, numUniqueWords * sizeof(int)); CHECK_CUDA_ERROR();
cudaMalloc(&gpuGlobalOffset, sizeof(int)); CHECK_CUDA_ERROR();
cudaMalloc(&gpuBlockCounts, numUniqueWords * sizeof(int) * NUM_BLOCKS); CHECK_CUDA_ERROR();
cudaMemcpy(gpuData, inputFile, len, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemcpy(gpuG, cpuG, gElems * sizeof(int), cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemcpy(gpuT0, cpuT0, t0Elems * sizeof(unsigned int), cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemcpy(gpuT1, cpuT1, t1Elems * sizeof(unsigned int), cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemcpy(gpuT2, cpuT2, t2Elems * sizeof(unsigned int), cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemcpyToSymbol("hashConstT0", cpuT0, t0Elems * sizeof(unsigned int), 0, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemcpyToSymbol("hashConstT1", cpuT1, t1Elems * sizeof(unsigned int), 0, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemcpyToSymbol("hashConstT2", cpuT2, t2Elems * sizeof(unsigned int), 0, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR();
cudaMemset(gpuGlobalOffset, 0, sizeof(int)); CHECK_CUDA_ERROR();
for (int i = 0; i < NUM_KERNELS; ++i)
{
cudaEventCreate(beginEvents + i);
cudaEventCreate(endEvents + i);
}
delete [] cpuG;
delete [] cpuT0;
delete [] cpuT1;
delete [] cpuT2;
delete [] inputFile;
int KERNEL_INDEX = 0;
cudaMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
cudaStreamSynchronize(0); CHECK_CUDA_ERROR();
cudaEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel0(numLines, gpuData, gpuG, gpuT0, gpuT1, gpuT2, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
cudaEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
cudaEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
cudaMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
cudaStreamSynchronize(0); CHECK_CUDA_ERROR();
cudaEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel1(numLines, gpuData, gpuG, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
cudaEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
cudaEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
cudaMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
cudaStreamSynchronize(0); CHECK_CUDA_ERROR();
cudaEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel2(gpuData, gpuG, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
cudaEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
cudaEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
cudaMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
cudaStreamSynchronize(0); CHECK_CUDA_ERROR();
cudaEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel3(numLines, gpuData, gpuG, gpuCounts, numUniqueWords); CHECK_CUDA_ERROR();
cudaEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
cudaEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
cudaMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
cudaMemset(gpuBlockCounts, 0, sizeof(int) * numUniqueWords * NUM_BLOCKS); CHECK_CUDA_ERROR();
cudaStreamSynchronize(0); CHECK_CUDA_ERROR();
cudaEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel4(gpuData, gpuG, numUniqueWords, gpuCounts, gpuBlockCounts); CHECK_CUDA_ERROR();
cudaEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
cudaEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
cudaMemset(gpuCounts, 0, sizeof(int) * numUniqueWords); CHECK_CUDA_ERROR();
cudaMemset(gpuBlockCounts, 0, sizeof(int) * numUniqueWords * NUM_BLOCKS); CHECK_CUDA_ERROR();
cudaStreamSynchronize(0); CHECK_CUDA_ERROR();
cudaEventRecord(beginEvents[KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
runKernel5(gpuData, gpuG, numUniqueWords, gpuCounts, gpuBlockCounts); CHECK_CUDA_ERROR();
cudaEventRecord(endEvents [KERNEL_INDEX], 0); CHECK_CUDA_ERROR();
cudaEventSynchronize(endEvents[KERNEL_INDEX]); CHECK_CUDA_ERROR();
checkOutput(realNumUniqueWords, gpuCounts);
++KERNEL_INDEX;
for (int i = 0; i < KERNEL_INDEX; ++i)
{
float ms;
cudaEventElapsedTime(&ms, beginEvents[i], endEvents[i]);
printf("kernel %d took %.3f ms.\n", i, ms);
}
return 0;
}
void testSortCPU()
{
const int THREADS = THREADS_PER_BLOCK;
const int KEYS_PER_THREAD = 4;
int * keys = new int[THREADS * KEYS_PER_THREAD];
int * vals = new int[THREADS * KEYS_PER_THREAD];
int * gpuKeys, * gpuVals;
cudaMalloc(reinterpret_cast<void ** >(&gpuKeys), sizeof(int) * THREADS * KEYS_PER_THREAD);
cudaMalloc(reinterpret_cast<void ** >(&gpuVals), sizeof(int) * THREADS * KEYS_PER_THREAD);
for (int i = 0; i < THREADS * KEYS_PER_THREAD; ++i)
{
keys[i] = i;
vals[i] = THREADS * KEYS_PER_THREAD - i - 1;
}
for (int i = 0; i < THREADS * KEYS_PER_THREAD; ++i)
{
const int ind0 = rand() % (THREADS * KEYS_PER_THREAD);
const int ind1 = rand() % (THREADS * KEYS_PER_THREAD);
swap(keys[ind0], keys[ind1]);
swap(vals[ind0], vals[ind1]);
}
cudaMemcpy(gpuKeys, keys, sizeof(int) * THREADS * KEYS_PER_THREAD, cudaMemcpyHostToDevice);
cudaMemcpy(gpuVals, vals, sizeof(int) * THREADS * KEYS_PER_THREAD, cudaMemcpyHostToDevice);
testSort<THREADS, KEYS_PER_THREAD><<<1, THREADS>>>(gpuKeys, gpuVals);
cudaMemcpy(keys, gpuKeys, sizeof(int) * THREADS * KEYS_PER_THREAD, cudaMemcpyDeviceToHost);
cudaMemcpy(vals, gpuVals, sizeof(int) * THREADS * KEYS_PER_THREAD, cudaMemcpyDeviceToHost);
for (int i = 0; i < THREADS * KEYS_PER_THREAD; ++i)
{
printf("%4d: %4d %4d\n", i, keys[i], vals[i]);
}
fflush(stdout);
cudaFree(gpuKeys);
cudaFree(gpuVals);
delete [] keys;
delete [] vals;
}
template <typename T>
T * readFromFile(const char * const fileName, int & numElems)
{
T * ret;
FILE * fp = fopen(fileName, "rb");
fread(&numElems, sizeof(numElems), 1, fp);
numElems /= sizeof(T);
ret = new T[numElems];
fread(ret, numElems * sizeof(T), 1, fp);
fclose(fp);
return ret;
}
void runKernel0(const int numLines,
void * gpuData,
void * gpuG,
void * gpuT0,
void * gpuT1,
void * gpuT2,
void * gpuCounts,
const int numUniqueWords)
{
const int totalBlocks = (numLines + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int blocksSoFar = 0;
while (blocksSoFar < totalBlocks)
{
int numBlocks = (totalBlocks - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : totalBlocks - blocksSoFar);
printf("running %d blocks and %d threads with %d blocks before this.\n", numBlocks, THREADS_PER_BLOCK, blocksSoFar);
wordCountKernel0<<<numBlocks, THREADS_PER_BLOCK, 0, 0>>>(blocksSoFar,
gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuT0),
reinterpret_cast<unsigned int * >(gpuT1),
reinterpret_cast<unsigned int * >(gpuT2),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
blocksSoFar += numBlocks;
}
}
void runKernel1(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords)
{
const int totalBlocks = (numLines + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int blocksSoFar = 0;
while (blocksSoFar < totalBlocks)
{
int numBlocks = (totalBlocks - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : totalBlocks - blocksSoFar);
printf("running %d blocks and %d threads with %d blocks before this.\n", numBlocks, THREADS_PER_BLOCK, blocksSoFar);
wordCountKernel1<<<numBlocks, THREADS_PER_BLOCK, 0, 0>>>(blocksSoFar,
gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
blocksSoFar += numBlocks;
}
}
void runKernel2(void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords)
{
int numBlocks = NUM_BLOCKS;
printf("running %d blocks and %d threads.\n", numBlocks, THREADS_PER_BLOCK);
wordCountKernel2<<<numBlocks, THREADS_PER_BLOCK, 0, 0>>>(gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
}
void runKernel3(const int numLines,
void * gpuData,
void * gpuG,
void * gpuCounts,
const int numUniqueWords)
{
const int totalBlocks = (numLines + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int blocksSoFar = 0;
while (blocksSoFar < totalBlocks)
{
int numBlocks = (totalBlocks - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : totalBlocks - blocksSoFar);
printf("running %d blocks and %d threads with %d blocks before this.\n", numBlocks, THREADS_PER_BLOCK, blocksSoFar);
wordCountKernel3<<<numBlocks, THREADS_PER_BLOCK, 0, 0>>>(blocksSoFar,
gpuData,
reinterpret_cast<int * >(gpuG),
reinterpret_cast<unsigned int * >(gpuCounts));
CHECK_CUDA_ERROR();
blocksSoFar += numBlocks;
}
}
void runKernel4(void * gpuData,
void * gpuG,
int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts)
{
printf("running %d blocks and %d threads.\n", NUM_BLOCKS, THREADS_PER_BLOCK);
wordCountKernel4<<<NUM_BLOCKS, THREADS_PER_BLOCK, 0, 0>>>(gpuData,
reinterpret_cast<int * >(gpuG),
numUniqueWords,
reinterpret_cast<unsigned int * >(gpuBlockCounts));
// cudaMemcpyAsync(gpuCounts, gpuBlockCounts, sizeof(int) * numUniqueWords, cudaMemcpyDeviceToDevice, 0);
int numBlocks = (numUniqueWords + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
wordCountReduce<NUM_BLOCKS><<<numBlocks, THREADS_PER_BLOCK, 0, 0>>>(numUniqueWords,
reinterpret_cast<unsigned int * >(gpuCounts),
reinterpret_cast<unsigned int * >(gpuBlockCounts));
CHECK_CUDA_ERROR();
}
void runKernel5(void * gpuData,
void * gpuG,
int numUniqueWords,
void * gpuCounts,
void * gpuBlockCounts)
{
printf("running %d blocks and %d threads.\n", NUM_BLOCKS, THREADS_PER_BLOCK);
wordCountKernel5<<<NUM_BLOCKS, THREADS_PER_BLOCK, 0, 0>>>(gpuData,
reinterpret_cast<int * >(gpuG),
numUniqueWords,
reinterpret_cast<unsigned int * >(gpuBlockCounts));
// cudaMemcpyAsync(gpuCounts, gpuBlockCounts, sizeof(int) * numUniqueWords, cudaMemcpyDeviceToDevice, 0);
int numBlocks = (numUniqueWords + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
wordCountReduce<NUM_BLOCKS><<<numBlocks, THREADS_PER_BLOCK, 0, 0>>>(numUniqueWords,
reinterpret_cast<unsigned int * >(gpuCounts),
reinterpret_cast<unsigned int * >(gpuBlockCounts));
CHECK_CUDA_ERROR();
}
void checkOutput(const int numUniqueWords, void * gpuCounts)
{
int * cpuCounts = new int[numUniqueWords];
cudaMemcpy(cpuCounts, gpuCounts, sizeof(int) * numUniqueWords, cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR();
int zero = 0;
for (int i = 0; i < numUniqueWords; ++i)
{
if (cpuCounts[i] == 0)
{
++zero;
printf("%d: %d\n", i, cpuCounts[i]);
}
}
for (int i = 0; i < 10; ++i) printf("%d: %d\n", i, cpuCounts[i]);
printf("%d zero-valued entries.\n", zero);
delete [] cpuCounts;
}
|
bf20a83122aca367b1120cc0353dba2171646144.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KernelFunctionDefinitions.h"
__global__ void globalMemAdd(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] + input2[tid];
}
}
__global__ void globalMemSub(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] - input2[tid];
}
}
__global__ void globalMemMult(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] * input2[tid];
}
}
__global__ void globalMemMod(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] % input2[tid];
}
}
| bf20a83122aca367b1120cc0353dba2171646144.cu | #include "KernelFunctionDefinitions.h"
__global__ void globalMemAdd(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] + input2[tid];
}
}
__global__ void globalMemSub(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] - input2[tid];
}
}
__global__ void globalMemMult(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] * input2[tid];
}
}
__global__ void globalMemMod(int* output, const int* input1, const int* input2, const size_t count)
{
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < count) {
output[tid] = input1[tid] % input2[tid];
}
}
|
a6b9744731275b6bd016e5ab6b41573ad441bbdb.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
// Kernel definition
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
} | a6b9744731275b6bd016e5ab6b41573ad441bbdb.cu |
#include <cuda.h>
// Kernel definition
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
} |
b71f64991b09fbb3b09a6edd08724258c18f9b2b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
struct RGBImage {
long Xsize;
long Ysize;
float* A;
};
void read_image(const int N, RGBImage* I) {
I->Xsize = N;
I->Ysize = N;
I->A = NULL;
I->A = (float*) malloc(N*N*sizeof(float));
for (long i = 0; i < N*N; i++) {
I->A[i] = 0.;
}
}
void free_image(RGBImage* I) {
long N = I->Xsize * I->Ysize;
if (N) free(I->A);
I->A = NULL;
}
#define FWIDTH 3
// filter for jacobi
float filter[FWIDTH][FWIDTH] = {
0, 0.25, 0,
0.25, 0, 0.25,
0, 0.25, 0};
void CPU_convolution(float* I, const float* I0, long Xsize, long Ysize) {
constexpr long FWIDTH_HALF = (FWIDTH-1)/2;
float h2 = 0.25 / ((Xsize + 1)*(Ysize + 1));
#pragma omp parallel for collapse(2) schedule(static)
for (long i0 = 0; i0 <= Xsize-FWIDTH; i0++) {
for (long i1 = 0; i1 <= Ysize-FWIDTH; i1++) {
float sum = 0;
for (long j0 = 0; j0 < FWIDTH; j0++) {
for (long j1 = 0; j1 < FWIDTH; j1++) {
sum += I0[(i0+j0)*Ysize + (i1+j1)] * filter[j0][j1];
}
}
I[(i0+FWIDTH_HALF)*Ysize + (i1+FWIDTH_HALF)] = (float)fabs(sum) + h2;
}
}
}
#define BLOCK_DIM 32
__constant__ float filter_gpu[FWIDTH][FWIDTH];
__global__ void GPU_convolution_no_smem(float* I, const float* I0, long Xsize, long Ysize) {
constexpr long FWIDTH_HALF = (FWIDTH-1)/2;
long offset_x = blockIdx.x * (BLOCK_DIM-FWIDTH);
long offset_y = blockIdx.y * (BLOCK_DIM-FWIDTH);
float sum = 0;
for (long j0 = 0; j0 < FWIDTH; j0++) {
for (long j1 = 0; j1 < FWIDTH; j1++) {
sum += I0[(offset_x + threadIdx.x + j0)*Ysize + (offset_y + threadIdx.y + j1)] * filter_gpu[j0][j1];
}
}
if (threadIdx.x+FWIDTH < BLOCK_DIM && threadIdx.y+FWIDTH < BLOCK_DIM)
if (offset_x+threadIdx.x+FWIDTH <= Xsize && offset_y+threadIdx.y+FWIDTH <= Ysize)
I[(offset_x+threadIdx.x+FWIDTH_HALF)*Ysize + (offset_y+threadIdx.y+FWIDTH_HALF)] = (float)fabs(sum);
}
__global__ void GPU_convolution(float* I, const float* I0, long Xsize, long Ysize) {
constexpr long FWIDTH_HALF = (FWIDTH-1)/2;
__shared__ float smem[BLOCK_DIM+FWIDTH][BLOCK_DIM+FWIDTH];
long offset_x = blockIdx.x * (BLOCK_DIM-FWIDTH);
long offset_y = blockIdx.y * (BLOCK_DIM-FWIDTH);
float h2 = 0.25 / ((Xsize + 1)*(Ysize + 1));
smem[threadIdx.x][threadIdx.y] = 0;
if (offset_x + threadIdx.x < Xsize && offset_y + threadIdx.y < Ysize)
smem[threadIdx.x][threadIdx.y] = I0[(offset_x + threadIdx.x)*Ysize + (offset_y + threadIdx.y)];
__syncthreads();
float sum = 0;
for (long j0 = 0; j0 < FWIDTH; j0++) {
for (long j1 = 0; j1 < FWIDTH; j1++) {
sum += smem[threadIdx.x+j0][threadIdx.y+j1] * filter_gpu[j0][j1];
}
}
if (threadIdx.x+FWIDTH < BLOCK_DIM && threadIdx.y+FWIDTH < BLOCK_DIM)
if (offset_x+threadIdx.x+FWIDTH <= Xsize && offset_y+threadIdx.y+FWIDTH <= Ysize)
I[(offset_x+threadIdx.x+FWIDTH_HALF)*Ysize + (offset_y+threadIdx.y+FWIDTH_HALF)] = (float)fabs(sum) + h2;
}
int main() {
long repeat = 1000;
long N = 1000;
// Load image from file
RGBImage I0, I1, I1_ref;
read_image(N, &I0);
read_image(N, &I1);
read_image(N, &I1_ref);
long Xsize = I0.Xsize;
long Ysize = I0.Ysize;
// Filter on CPU
Timer t;
t.tic();
for (long i = 0; i < repeat; i++) CPU_convolution(I1_ref.A, I0.A, Xsize, Ysize);
double tt = t.toc();
printf("CPU time = %fs\n", tt);
printf("CPU flops = %fGFlop/s\n", repeat * 2*(Xsize-FWIDTH)*(Ysize-FWIDTH)*FWIDTH*FWIDTH/tt*1e-9);
// Allocate GPU memory
float *I0gpu, *I1gpu;
hipMalloc(&I0gpu, Xsize*Ysize*sizeof(float));
hipMalloc(&I1gpu, Xsize*Ysize*sizeof(float));
hipMemcpy(I0gpu, I0.A, Xsize*Ysize*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(I1gpu, I1.A, Xsize*Ysize*sizeof(float), hipMemcpyHostToDevice);
hipMemcpyToSymbol(filter_gpu, filter, sizeof(filter_gpu)); // Initialize filter_gpu
// Create streams
hipStream_t streams[1];
hipStreamCreate(&streams[0]);
// Dry run
dim3 blockDim(BLOCK_DIM, BLOCK_DIM);
dim3 gridDim(Xsize/(BLOCK_DIM-FWIDTH)+1, Ysize/(BLOCK_DIM-FWIDTH)+1);
hipLaunchKernelGGL(( GPU_convolution), dim3(gridDim),dim3(blockDim), 0, streams[0], I1gpu+0*Xsize*Ysize, I0gpu+0*Xsize*Ysize, Xsize, Ysize);
// Filter on GPU
hipDeviceSynchronize();
t.tic();
for (long i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( GPU_convolution), dim3(gridDim),dim3(blockDim), 0, streams[0], I1gpu+0*Xsize*Ysize, I0gpu+0*Xsize*Ysize, Xsize, Ysize);
}
hipDeviceSynchronize();
tt = t.toc();
printf("GPU time = %fs\n", tt);
printf("GPU flops = %fGFlop/s\n", repeat * 2*(Xsize-FWIDTH)*(Ysize-FWIDTH)*FWIDTH*FWIDTH/tt*1e-9);
// Print error
float err = 0;
hipMemcpy(I1.A, I1gpu, Xsize*Ysize*sizeof(float), hipMemcpyDeviceToHost);
for (long i = 0; i < Xsize*Ysize; i++) err = ::max(err, fabs(I1.A[i] - I1_ref.A[i]));
printf("Error = %e\n", err);
// Free memory
hipStreamDestroy(streams[0]);
hipFree(I0gpu);
hipFree(I1gpu);
free_image(&I0);
free_image(&I1);
free_image(&I1_ref);
return 0;
}
| b71f64991b09fbb3b09a6edd08724258c18f9b2b.cu | #include <algorithm>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "utils.h"
struct RGBImage {
long Xsize;
long Ysize;
float* A;
};
void read_image(const int N, RGBImage* I) {
I->Xsize = N;
I->Ysize = N;
I->A = NULL;
I->A = (float*) malloc(N*N*sizeof(float));
for (long i = 0; i < N*N; i++) {
I->A[i] = 0.;
}
}
void free_image(RGBImage* I) {
long N = I->Xsize * I->Ysize;
if (N) free(I->A);
I->A = NULL;
}
#define FWIDTH 3
// filter for jacobi
float filter[FWIDTH][FWIDTH] = {
0, 0.25, 0,
0.25, 0, 0.25,
0, 0.25, 0};
void CPU_convolution(float* I, const float* I0, long Xsize, long Ysize) {
constexpr long FWIDTH_HALF = (FWIDTH-1)/2;
float h2 = 0.25 / ((Xsize + 1)*(Ysize + 1));
#pragma omp parallel for collapse(2) schedule(static)
for (long i0 = 0; i0 <= Xsize-FWIDTH; i0++) {
for (long i1 = 0; i1 <= Ysize-FWIDTH; i1++) {
float sum = 0;
for (long j0 = 0; j0 < FWIDTH; j0++) {
for (long j1 = 0; j1 < FWIDTH; j1++) {
sum += I0[(i0+j0)*Ysize + (i1+j1)] * filter[j0][j1];
}
}
I[(i0+FWIDTH_HALF)*Ysize + (i1+FWIDTH_HALF)] = (float)fabs(sum) + h2;
}
}
}
#define BLOCK_DIM 32
__constant__ float filter_gpu[FWIDTH][FWIDTH];
__global__ void GPU_convolution_no_smem(float* I, const float* I0, long Xsize, long Ysize) {
constexpr long FWIDTH_HALF = (FWIDTH-1)/2;
long offset_x = blockIdx.x * (BLOCK_DIM-FWIDTH);
long offset_y = blockIdx.y * (BLOCK_DIM-FWIDTH);
float sum = 0;
for (long j0 = 0; j0 < FWIDTH; j0++) {
for (long j1 = 0; j1 < FWIDTH; j1++) {
sum += I0[(offset_x + threadIdx.x + j0)*Ysize + (offset_y + threadIdx.y + j1)] * filter_gpu[j0][j1];
}
}
if (threadIdx.x+FWIDTH < BLOCK_DIM && threadIdx.y+FWIDTH < BLOCK_DIM)
if (offset_x+threadIdx.x+FWIDTH <= Xsize && offset_y+threadIdx.y+FWIDTH <= Ysize)
I[(offset_x+threadIdx.x+FWIDTH_HALF)*Ysize + (offset_y+threadIdx.y+FWIDTH_HALF)] = (float)fabs(sum);
}
__global__ void GPU_convolution(float* I, const float* I0, long Xsize, long Ysize) {
constexpr long FWIDTH_HALF = (FWIDTH-1)/2;
__shared__ float smem[BLOCK_DIM+FWIDTH][BLOCK_DIM+FWIDTH];
long offset_x = blockIdx.x * (BLOCK_DIM-FWIDTH);
long offset_y = blockIdx.y * (BLOCK_DIM-FWIDTH);
float h2 = 0.25 / ((Xsize + 1)*(Ysize + 1));
smem[threadIdx.x][threadIdx.y] = 0;
if (offset_x + threadIdx.x < Xsize && offset_y + threadIdx.y < Ysize)
smem[threadIdx.x][threadIdx.y] = I0[(offset_x + threadIdx.x)*Ysize + (offset_y + threadIdx.y)];
__syncthreads();
float sum = 0;
for (long j0 = 0; j0 < FWIDTH; j0++) {
for (long j1 = 0; j1 < FWIDTH; j1++) {
sum += smem[threadIdx.x+j0][threadIdx.y+j1] * filter_gpu[j0][j1];
}
}
if (threadIdx.x+FWIDTH < BLOCK_DIM && threadIdx.y+FWIDTH < BLOCK_DIM)
if (offset_x+threadIdx.x+FWIDTH <= Xsize && offset_y+threadIdx.y+FWIDTH <= Ysize)
I[(offset_x+threadIdx.x+FWIDTH_HALF)*Ysize + (offset_y+threadIdx.y+FWIDTH_HALF)] = (float)fabs(sum) + h2;
}
int main() {
long repeat = 1000;
long N = 1000;
// Load image from file
RGBImage I0, I1, I1_ref;
read_image(N, &I0);
read_image(N, &I1);
read_image(N, &I1_ref);
long Xsize = I0.Xsize;
long Ysize = I0.Ysize;
// Filter on CPU
Timer t;
t.tic();
for (long i = 0; i < repeat; i++) CPU_convolution(I1_ref.A, I0.A, Xsize, Ysize);
double tt = t.toc();
printf("CPU time = %fs\n", tt);
printf("CPU flops = %fGFlop/s\n", repeat * 2*(Xsize-FWIDTH)*(Ysize-FWIDTH)*FWIDTH*FWIDTH/tt*1e-9);
// Allocate GPU memory
float *I0gpu, *I1gpu;
cudaMalloc(&I0gpu, Xsize*Ysize*sizeof(float));
cudaMalloc(&I1gpu, Xsize*Ysize*sizeof(float));
cudaMemcpy(I0gpu, I0.A, Xsize*Ysize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(I1gpu, I1.A, Xsize*Ysize*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(filter_gpu, filter, sizeof(filter_gpu)); // Initialize filter_gpu
// Create streams
cudaStream_t streams[1];
cudaStreamCreate(&streams[0]);
// Dry run
dim3 blockDim(BLOCK_DIM, BLOCK_DIM);
dim3 gridDim(Xsize/(BLOCK_DIM-FWIDTH)+1, Ysize/(BLOCK_DIM-FWIDTH)+1);
GPU_convolution<<<gridDim,blockDim, 0, streams[0]>>>(I1gpu+0*Xsize*Ysize, I0gpu+0*Xsize*Ysize, Xsize, Ysize);
// Filter on GPU
cudaDeviceSynchronize();
t.tic();
for (long i = 0; i < repeat; i++) {
GPU_convolution<<<gridDim,blockDim, 0, streams[0]>>>(I1gpu+0*Xsize*Ysize, I0gpu+0*Xsize*Ysize, Xsize, Ysize);
}
cudaDeviceSynchronize();
tt = t.toc();
printf("GPU time = %fs\n", tt);
printf("GPU flops = %fGFlop/s\n", repeat * 2*(Xsize-FWIDTH)*(Ysize-FWIDTH)*FWIDTH*FWIDTH/tt*1e-9);
// Print error
float err = 0;
cudaMemcpy(I1.A, I1gpu, Xsize*Ysize*sizeof(float), cudaMemcpyDeviceToHost);
for (long i = 0; i < Xsize*Ysize; i++) err = std::max(err, fabs(I1.A[i] - I1_ref.A[i]));
printf("Error = %e\n", err);
// Free memory
cudaStreamDestroy(streams[0]);
cudaFree(I0gpu);
cudaFree(I1gpu);
free_image(&I0);
free_image(&I1);
free_image(&I1_ref);
return 0;
}
|
ef8596c0b5f09cccf4a89db72f5bf618ebdce5bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pairs of lines
*/
typedef struct {
magmaDoubleComplex *A;
magmaDoubleComplex *B;
int n, ldda, lddb, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_zswapblk_params_t;
__global__ void magmagpu_zswapblkrm( magmagpu_zswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
magmaDoubleComplex *A = params.A + y - params.ldda;
magmaDoubleComplex *B = params.B + y;
for( int i = 0; i < params.npivots; i++ )
{
A += params.ldda;
if ( params.ipiv[i] == -1 )
continue;
magmaDoubleComplex tmp1 = *A;
magmaDoubleComplex *tmp2 = B + params.ipiv[i]*params.lddb;
*A = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_zswapblkcm( magmagpu_zswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.ldda;
unsigned int offset2 = y*params.lddb;
if( y < params.n )
{
magmaDoubleComplex *A = params.A + offset1 - 1;
magmaDoubleComplex *B = params.B + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A++;
if ( params.ipiv[i] == -1 )
continue;
magmaDoubleComplex tmp1 = *A;
magmaDoubleComplex *tmp2 = B + params.ipiv[i];
*A = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/**
@ingroup magma_zblas2
********************************************************************/
extern "C" void
magmablas_zswapblk_q(
magma_order_t order, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_zswapblk_params_t params = { dA+k, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_zswapblkcm), dim3(blocks), dim3(blocksize), 0, queue , params );
}
}
else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_zswapblk_params_t params = { dA+k*ldda, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
hipLaunchKernelGGL(( magmagpu_zswapblkrm), dim3(blocks), dim3(blocksize), 0, queue , params );
}
}
}
/**
@see magmablas_zswapblk_q
@ingroup magma_zblas2
********************************************************************/
extern "C" void
magmablas_zswapblk(
magma_order_t order, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
magmablas_zswapblk_q(
order, n, dA, ldda, dB, lddb, i1, i2, ipiv, inci, offset, magma_stream );
}
| ef8596c0b5f09cccf4a89db72f5bf618ebdce5bf.cu | /*
-- MAGMA (version 1.6.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
#define BLOCK_SIZE 64
/*********************************************************/
/*
* Blocked version: swap several pairs of lines
*/
typedef struct {
magmaDoubleComplex *A;
magmaDoubleComplex *B;
int n, ldda, lddb, npivots;
short ipiv[BLOCK_SIZE];
} magmagpu_zswapblk_params_t;
__global__ void magmagpu_zswapblkrm( magmagpu_zswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
if( y < params.n )
{
magmaDoubleComplex *A = params.A + y - params.ldda;
magmaDoubleComplex *B = params.B + y;
for( int i = 0; i < params.npivots; i++ )
{
A += params.ldda;
if ( params.ipiv[i] == -1 )
continue;
magmaDoubleComplex tmp1 = *A;
magmaDoubleComplex *tmp2 = B + params.ipiv[i]*params.lddb;
*A = *tmp2;
*tmp2 = tmp1;
}
}
}
__global__ void magmagpu_zswapblkcm( magmagpu_zswapblk_params_t params )
{
unsigned int y = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int offset1 = y*params.ldda;
unsigned int offset2 = y*params.lddb;
if( y < params.n )
{
magmaDoubleComplex *A = params.A + offset1 - 1;
magmaDoubleComplex *B = params.B + offset2;
for( int i = 0; i < params.npivots; i++ )
{
A++;
if ( params.ipiv[i] == -1 )
continue;
magmaDoubleComplex tmp1 = *A;
magmaDoubleComplex *tmp2 = B + params.ipiv[i];
*A = *tmp2;
*tmp2 = tmp1;
}
}
__syncthreads();
}
/**
@ingroup magma_zblas2
********************************************************************/
extern "C" void
magmablas_zswapblk_q(
magma_order_t order, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset,
magma_queue_t queue )
{
magma_int_t blocksize = 64;
dim3 blocks( (n+blocksize-1) / blocksize, 1, 1);
magma_int_t k, im;
/* Quick return */
if ( n == 0 )
return;
if ( order == MagmaColMajor ) {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_zswapblk_params_t params = { dA+k, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_zswapblkcm<<< blocks, blocksize, 0, queue >>>( params );
}
}
else {
for( k=(i1-1); k<i2; k+=BLOCK_SIZE )
{
magma_int_t sb = min(BLOCK_SIZE, i2-k);
magmagpu_zswapblk_params_t params = { dA+k*ldda, dB, n, ldda, lddb, sb };
for( magma_int_t j = 0; j < sb; j++ )
{
im = ipiv[(k+j)*inci] - 1;
if ( (k+j) == im )
params.ipiv[j] = -1;
else
params.ipiv[j] = im - offset;
}
magmagpu_zswapblkrm<<< blocks, blocksize, 0, queue >>>( params );
}
}
}
/**
@see magmablas_zswapblk_q
@ingroup magma_zblas2
********************************************************************/
extern "C" void
magmablas_zswapblk(
magma_order_t order, magma_int_t n,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_int_t i1, magma_int_t i2,
const magma_int_t *ipiv, magma_int_t inci, magma_int_t offset )
{
magmablas_zswapblk_q(
order, n, dA, ldda, dB, lddb, i1, i2, ipiv, inci, offset, magma_stream );
}
|
1ea49ffc491ec9f8c3f948e65318810668220db2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void translate_2D(float* coords, size_t dim_y, size_t dim_x, float seg_y, float seg_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y;
if(index < total){
coords[index] += seg_y;
coords[index + total] += seg_x;
__syncthreads();
}
} | 1ea49ffc491ec9f8c3f948e65318810668220db2.cu | #include "includes.h"
__global__ void translate_2D(float* coords, size_t dim_y, size_t dim_x, float seg_y, float seg_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y;
if(index < total){
coords[index] += seg_y;
coords[index + total] += seg_x;
__syncthreads();
}
} |
18ba7bffa315af94791f29a5d16a253308b2b0e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zgetf2_kernels.cu normal z -> s, Tue Feb 9 16:05:38 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
/*
Purpose
-------
These are internal routines that might have many assumption.
They are used in sgetf2_batched.cpp
No documentation is available today.
@ingroup magma_sgesv_aux
*/
#define PRECISION_s
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ float shared_data[];
extern __shared__ float sdata[];
extern __shared__ int int_sdata[];
//////////////////////////////////////////////////////////////////////////////////////////
__device__ int
isamax_devfunc(int length, const float *x, int incx, float *shared_x, int *shared_idx)
{
int tx = threadIdx.x;
float res;
float res1;
int nchunk = magma_ceildiv( length, zamax );
if ( tx < zamax ) {
shared_x[tx] = 0.0;
shared_idx[tx] = tx; //-1; // -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output
}
__syncthreads();
for (int s =0; s < nchunk; s++)
{
if ( (tx + s * zamax < length) && (tx < zamax) )
{
res = x[(tx + s * zamax) * incx];
res1 = fabs(MAGMA_S_REAL(res)) + fabs(MAGMA_S_IMAG(res));
if ( res1 > shared_x[tx] )
{
shared_x[tx] = res1;
shared_idx[tx] = tx + s * zamax;
}
}
__syncthreads();
}
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
return shared_idx[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
isamax_kernel_batched(int length, int chunk, float **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep)
{
float *x_start = x_array[blockIdx.z];
const float *x = &(x_start[step + step * lda]);
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
float *shared_x = sdata;
int *shared_idx = (int*)(shared_x + zamax);
isamax_devfunc(length, x, incx, shared_x, shared_idx);
if (tx == 0) {
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
tree_isamax_kernel_batched(int length, float **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep,
float** data_pool_array, magma_int_t** id_pool_array)
{
float *x_start = x_array[blockIdx.z];
const float *x = &(x_start[step + step * lda]);
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
int local_max_id;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
x += zamax * blockIdx.x * incx;
isamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx);
if (tx == 0)
{
local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset
if (gridDim.x == 1)
{
ipiv[step] = local_max_id + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = local_max_id + step + gbstep + 1;
}
else
{
// put each thread block local max and its index in workspace
data_pool[blockIdx.x] = shared_x[0];
id_pool[blockIdx.x] = local_max_id;
}
}
}
__global__ void
tree_isamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, float** data_pool_array, magma_int_t** id_pool_array)
{
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
int tx = threadIdx.x;
//read data
if ( tx < n)
{
shared_x[tx] = data_pool[tx];
shared_idx[tx] = id_pool[tx];
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = -2;
}
__syncthreads();
// compute local result inside each thread block
magma_getidmax<zamax>(tx, shared_x, shared_idx);
if (tx == 0 )
{
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
magma_int_t magma_isamax_lg_batched(magma_int_t length, float **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 1) return 0;
if (incx < 0) return 1;
float* data_pool;
magma_int_t* id_pool;
float** data_pool_array = NULL;
magma_int_t** id_pool_array = NULL;
magma_int_t num_blocks = (length-1)/(zamax) + 1;
// creat pools(data and index) to store the result of each thread blocks
magma_smalloc(&data_pool, num_blocks * batchCount);
magma_imalloc(&id_pool, num_blocks * batchCount);
magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array));
magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array));
#if defined(PRECISION_z) || defined(PRECISION_d)
magma_sset_pointer( data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue );
#else
magma_sset_pointer( data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue );
#endif
magma_iset_pointer( id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue );
if ( num_blocks > zamax)
{
fprintf( stderr, "%s: length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax\n",
__func__, int(length), int(num_blocks), int(zamax));
}
else
{
// first level tree reduction
dim3 grid(num_blocks, 1, batchCount);
dim3 threads(zamax, 1, 1);
hipLaunchKernelGGL(( tree_isamax_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
if ( num_blocks > 1)
{
// second level tree reduction
dim3 grid2(1, 1, batchCount);
hipLaunchKernelGGL(( tree_isamax_kernel2_batched)
, dim3(grid2), dim3(threads), 0, queue->cuda_stream() ,
num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
}
}
magma_free(data_pool);
magma_free(id_pool);
magma_free(data_pool_array);
magma_free(id_pool_array);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ISAMAX find the index of max absolute value of elements in x and store the index in ipiv
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
length INTEGER
On entry, length specifies the size of vector x. length >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
the offset of ipiv
@param[in]
lda INTEGER
The leading dimension of each array A, internal use to find the starting position of x.
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
gbstep INTEGER
the offset of info, internal use
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_isamax_batched(magma_int_t length,
float **x_array, magma_int_t incx,
magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array,
magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 0 ) return 0;
dim3 grid(1, 1, batchCount);
dim3 threads(zamax, 1, 1);
#if 1
int chunk = magma_ceildiv( length, zamax );
hipLaunchKernelGGL(( isamax_kernel_batched), dim3(grid), dim3(threads), zamax * (sizeof(float) + sizeof(int)), queue->cuda_stream() ,
length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
#else
// the magma_isamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the isamax_kernel for that today we are using only isamax_kernel
if ( length <= 10 * zamax )
{
int chunk = magma_ceildiv( length, zamax );
hipLaunchKernelGGL(( isamax_kernel_batched), dim3(grid), dim3(threads), zamax * (sizeof(float) + sizeof(magma_int_t)), queue->cuda_stream() ,
length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
}
else
{
magma_isamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount);
}
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void sswap_kernel_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array)
{
float *x = x_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
__shared__ int jp;
if (threadIdx.x == 0)
{
jp = ipiv[step] - 1;
//if (blockIdx.z == 1) printf("jp=%d", jp);
}
__syncthreads();
if (jp == step) return; // no pivot
int id = threadIdx.x;
if (id < n) {
float tmp = x[jp + incx*id];
x[jp + incx*id] = x[step + incx*id];
x[step + incx*id] = tmp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
sswap two row in x. index (ipiv[step]-1)-th and index step -th
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
n INTEGER
On entry, n specifies the size of vector x. n >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_sswap_batched(magma_int_t n, float **x_array, magma_int_t incx,
magma_int_t step, magma_int_t** ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
sswap two row: (ipiv[step]-1)th and step th
*/
if ( n > MAX_NTHREADS)
{
fprintf( stderr, "%s nb=%d > %d, not supported\n", __func__, int(n), int(MAX_NTHREADS) );
return -15;
}
dim3 grid(1,1, batchCount);
dim3 threads(zamax, 1, 1);
hipLaunchKernelGGL(( sswap_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
n, x_array, incx, step, ipiv_array);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void sscal_sger_kernel_batched(int m, int n, int step, float **dA_array, int lda, magma_int_t *info_array, int gbstep)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
float *A_start = dA_array[blockIdx.z];
float *A = &(A_start[step + step * lda]);
float *shared_y = shared_data;
int tx = threadIdx.x;
int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x;
if (tx < n) {
shared_y[tx] = A[lda * tx];
}
__syncthreads();
if (shared_y[0] == MAGMA_S_ZERO) {
info_array[blockIdx.z] = step + gbstep + 1;
return;
}
if (gbidx < m && gbidx > 0) {
float reg = MAGMA_S_ZERO;
reg = A[gbidx];
reg *= MAGMA_S_DIV(MAGMA_S_ONE, shared_y[0]);
A[gbidx] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
//A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg; //cuda give wrong results with this one
//A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one
A[gbidx + i*lda] += (MAGMA_S_NEG_ONE) * shared_y[i] * reg;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_sscal_sger_batched(magma_int_t m, magma_int_t n, magma_int_t step,
float **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged sscal and sger the two kernels
1) sscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( n == 0) return 0;
if ( n > MAX_NTHREADS )
{
fprintf( stderr, "%s nb=%d, > %d, not supported\n", __func__, int(n), int(MAX_NTHREADS) );
return -15;
}
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
size_t shared_size = sizeof(float)*(n);
dim3 grid(nchunk, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
hipLaunchKernelGGL(( sscal_sger_kernel_batched)
, dim3(grid), dim3(threads), shared_size, queue->cuda_stream() ,
m, n, step, dA_array, lda, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void sgetf2trsm_kernel_batched(int ib, int n, float **dA_array, int step, int lda)
{
/*
this kernel does the safe nonblocked TRSM operation
B = A^-1 * B
*/
float *A_start = dA_array[blockIdx.z];
float *A = &(A_start[step + step * lda]);
float *B = &(A_start[step + (step+ib) * lda]);
float *shared_a = shared_data;
float *shared_b = shared_data+ib*ib;
int tid = threadIdx.x;
int i,d;
// Read A and B at the same time to the shared memory (shared_a shared_b)
// note that shared_b = shared_a+ib*ib so its contiguous
// I can make it in one loop reading
if ( tid < ib) {
#pragma unroll
for (i=0; i < n+ib; i++) {
shared_a[tid + i*ib] = A[tid + i*lda];
}
}
__syncthreads();
if (tid < n) {
#pragma unroll
for (d=0; d < ib-1; d++) {
for (i=d+1; i < ib; i++) {
shared_b[i+tid*ib] += (MAGMA_S_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib];
}
}
}
__syncthreads();
// write back B
if ( tid < ib) {
#pragma unroll
for (i=0; i < n; i++) {
B[tid + i*lda] = shared_b[tid + i*ib];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
sgetf2trsm solves one of the matrix equations on gpu
B = C^-1 * B
where C, B are part of the matrix A in dA_array,
This version load C, B into shared memory and solve it
and copy back to GPU device memory.
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
ib INTEGER
The number of rows/columns of each matrix C, and rows of B. ib >= 0.
@param[in]
n INTEGER
The number of columns of each matrix B. n >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C" void
magma_sgetf2trsm_batched(magma_int_t ib, magma_int_t n, float **dA_array,
magma_int_t step, magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
/*
*/
if ( n == 0 || ib == 0 ) return;
size_t shared_size = sizeof(float)*(ib*(ib+n));
// TODO TODO TODO
if ( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra
{
fprintf( stderr, "%s: error out of shared memory\n", __func__ );
return;
}
dim3 grid(1, 1, batchCount);
dim3 threads(max(n,ib), 1, 1);
hipLaunchKernelGGL(( sgetf2trsm_kernel_batched)
, dim3(grid), dim3(threads), shared_size, queue->cuda_stream() ,
ib, n, dA_array, step, ldda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zupdate_device(int m, int step, float* x, int ldx, float *A, int lda)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
int indx;
//float reg = MAGMA_S_ZERO;
// update the current column by all the previous one
#pragma unroll
for (int i=0; i < step; i++) {
for (int s=0; s < nchunk; s++)
{
indx = tid + s * MAX_NTHREADS;
if ( indx > i && indx < m ) {
A[indx] -= A[i] * x[indx + i*ldx];
//printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d\n", step, tid, x[tid + i*ldx], A[i], A[tid],i);
}
}
__syncthreads();
}
//printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
sscal5_device(int m, float* x, float alpha)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) {
#if 0
x[tid + s * MAX_NTHREADS] *= MAGMA_S_DIV(MAGMA_S_ONE, alpha);
#else
x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha;
#endif
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, float **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep)
{
int gboff = paneloffset+step;
magma_int_t *ipiv = ipiv_array[blockIdx.z];
float *A_start = dA_array[blockIdx.z];
float *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]);
float *A00 = &(A_start[paneloffset + paneloffset * lda]);
float *shared_A = shared_data;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
__shared__ float alpha;
int tid = threadIdx.x;
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// read the current column from dev to shared memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS];
}
__syncthreads();
// update this column
if ( step > 0 ) {
zupdate_device( m, step, A00, lda, shared_A, 1);
__syncthreads();
}
// if ( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE isamax_devfunc HAS __syncthreads INSIDE.
// So let all htreads call this routine it will handle correctly based on the size
// note that isamax need only 128 threads, s
isamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx);
if (tid == 0) {
ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing
alpha = shared_A[shared_idx[0]+step];
//printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f\n",step,ipiv[gboff],gboff,shared_idx[0],alpha);
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1;
}
}
__syncthreads();
if (shared_x[0] == MAGMA_D_ZERO) return;
__syncthreads();
// DO NO PUT THE IF CONDITION HERE SINCE isamax_devfunc HAS __syncthreads INSIDE.
sscal5_device( m-step, shared_A+step, alpha);
// put back the pivot that has been scaled with itself menaing =1
if (tid == 0) shared_A[shared_idx[0] + step] = alpha;
__syncthreads();
// write back from shared to dev memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m )
{
A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS];
//printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]);
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_scomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step,
float **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged sscal and sger the two kernels
1) sscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( m == 0) return 0;
size_t all_shmem_size = zamax*(sizeof(float)+sizeof(int)) + (m+2)*sizeof(float);
if ( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra
{
fprintf( stderr, "%s error out of shared memory\n", __func__ );
return -20;
}
size_t shared_size = sizeof(float)*m;
dim3 grid(1, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
hipLaunchKernelGGL(( zcomputecolumn_kernel_shared_batched)
, dim3(grid), dim3(threads), shared_size, queue->cuda_stream() ,
m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kernel_sgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
float **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array)
{
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
float *shared_A = shared_data;
float *A = dA_array[blockIdx.z];
float *shared_x = (float*)(shared_A + m * ib);
int *shared_idx = (int*)(shared_x + zamax);
float res;
int length;
__shared__ int jp;
// load data to shared memory
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
shared_A[tx + i * m] = A[tx + i * lda];
//printf("shared_A=%f ", shared_A[tx + i * m]);
}
}
__syncthreads();
for (int j=0; j < ib; j++)
{
length = m - j;
int offset = j + j*m;
//======================================
//find max
if (tx < zamax)
{
if ( tx < length)
{
res = shared_A[tx + offset];
// printf("res=%f\n", res);
shared_x[tx] = fabs(MAGMA_S_REAL(res)) + fabs(MAGMA_S_IMAG(res));
shared_idx[tx] = tx;
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = 0;
}
}
__syncthreads();
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
if (tx == 0)
{
jp = shared_idx[0];
if (shared_A[jp + offset] == 0.0) printf("error, A(jp,j) == 0.0\n");
ipiv[j] = j + (jp + 1); // Fortran Indexing
//if (blockIdx.x == 1) printf("jp=%d ", jp + j + 1);
}
__syncthreads();
//======================================
if ( jp != 0) //swap
{
if (tx < ib) {
//printf("A[jp]= %f, A[j]=%f, jp=%d\n", shared_A[jp + j + tx*m], shared_A[j + tx*m], jp);
float tmp = shared_A[jp + j + tx*m];
shared_A[jp + j + tx*m] = shared_A[j + tx*m];
shared_A[j + tx*m] = tmp;
}
}
__syncthreads();
//======================================
// Ger
if (tx < length && tx > 0)
{
res = shared_A[tx + offset];
res *= MAGMA_S_DIV(MAGMA_S_ONE, shared_A[0 + offset]); // scaling
shared_A[tx + offset] = res;
#pragma unroll 8
for (int i=1; i < ib-j; i++)
{
shared_A[tx + i*m + offset] += (MAGMA_S_NEG_ONE) * shared_A[i*m + offset] * res;
//printf("res= %f, shared_A=%f\n", res, shared_A[i*m + offset]);
}
}
__syncthreads();
} // end of j
//======================================
// write back
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
A[tx + i * lda] = shared_A[tx + i * m];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGETF2_SM computes an LU factorization of a general M-by-N matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
This version load entire matrix (m*ib) into shared memory and factorize it
with pivoting and copy back to GPU device memory.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix A. M >= 0.
@param[in]
ib INTEGER
The number of columns of each matrix A. ib >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_sgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
float **dA_array, magma_int_t ldda,
magma_int_t **ipiv_array,
magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
load entire matrix (m*ib) into shared memory and factorize it with pivoting and copy back.
*/
size_t shared_size = sizeof(float) * m * ib + (zamax) * (sizeof(float) + sizeof(int)) + sizeof(int);
if (shared_size > 47000)
{
fprintf( stderr, "%s: shared memory = %d, exceeds 48K, kernel cannot run\n", __func__, int(shared_size) );
return 1;
}
dim3 grid(1,1, batchCount);
dim3 threads(max(max(zamax, m), ib), 1, 1);
hipLaunchKernelGGL(( kernel_sgetf2_sm_batched), dim3(grid), dim3(threads), shared_size, queue->cuda_stream() ,
m, ib, dA_array, ldda, ipiv_array, info_array);
return 0;
}
| 18ba7bffa315af94791f29a5d16a253308b2b0e8.cu | /*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@author Azzam Haidar
@author Tingxing Dong
@generated from magmablas/zgetf2_kernels.cu normal z -> s, Tue Feb 9 16:05:38 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
#include "magma_templates.h"
/*
Purpose
-------
These are internal routines that might have many assumption.
They are used in sgetf2_batched.cpp
No documentation is available today.
@ingroup magma_sgesv_aux
*/
#define PRECISION_s
#define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ float shared_data[];
extern __shared__ float sdata[];
extern __shared__ int int_sdata[];
//////////////////////////////////////////////////////////////////////////////////////////
__device__ int
isamax_devfunc(int length, const float *x, int incx, float *shared_x, int *shared_idx)
{
int tx = threadIdx.x;
float res;
float res1;
int nchunk = magma_ceildiv( length, zamax );
if ( tx < zamax ) {
shared_x[tx] = 0.0;
shared_idx[tx] = tx; //-1; // -1 will crash the code in case matrix is singular, better is to put =tx and make check info at output
}
__syncthreads();
for (int s =0; s < nchunk; s++)
{
if ( (tx + s * zamax < length) && (tx < zamax) )
{
res = x[(tx + s * zamax) * incx];
res1 = fabs(MAGMA_S_REAL(res)) + fabs(MAGMA_S_IMAG(res));
if ( res1 > shared_x[tx] )
{
shared_x[tx] = res1;
shared_idx[tx] = tx + s * zamax;
}
}
__syncthreads();
}
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
return shared_idx[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
isamax_kernel_batched(int length, int chunk, float **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep)
{
float *x_start = x_array[blockIdx.z];
const float *x = &(x_start[step + step * lda]);
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
float *shared_x = sdata;
int *shared_idx = (int*)(shared_x + zamax);
isamax_devfunc(length, x, incx, shared_x, shared_idx);
if (tx == 0) {
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
tree_isamax_kernel_batched(int length, float **x_array, int incx,
int step, int lda, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep,
float** data_pool_array, magma_int_t** id_pool_array)
{
float *x_start = x_array[blockIdx.z];
const float *x = &(x_start[step + step * lda]);
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
int local_max_id;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
x += zamax * blockIdx.x * incx;
isamax_devfunc(min(zamax, length-blockIdx.x * zamax), x, incx, shared_x, shared_idx);
if (tx == 0)
{
local_max_id = shared_idx[0] + zamax * blockIdx.x; // add the offset
if (gridDim.x == 1)
{
ipiv[step] = local_max_id + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = local_max_id + step + gbstep + 1;
}
else
{
// put each thread block local max and its index in workspace
data_pool[blockIdx.x] = shared_x[0];
id_pool[blockIdx.x] = local_max_id;
}
}
}
__global__ void
tree_isamax_kernel2_batched(int n, int step, magma_int_t** ipiv_array, magma_int_t *info_array, int gbstep, float** data_pool_array, magma_int_t** id_pool_array)
{
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
float *data_pool = data_pool_array[blockIdx.z];
magma_int_t *id_pool = id_pool_array[blockIdx.z];
int tx = threadIdx.x;
//read data
if ( tx < n)
{
shared_x[tx] = data_pool[tx];
shared_idx[tx] = id_pool[tx];
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = -2;
}
__syncthreads();
// compute local result inside each thread block
magma_getidmax<zamax>(tx, shared_x, shared_idx);
if (tx == 0 )
{
ipiv[step] = shared_idx[0] + step + 1; // Fortran Indexing
if (shared_x[0] == MAGMA_D_ZERO)
info_array[blockIdx.z] = shared_idx[0] + step + gbstep + 1;
}
}
magma_int_t magma_isamax_lg_batched(magma_int_t length, float **x_array, magma_int_t incx, magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array, magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 1) return 0;
if (incx < 0) return 1;
float* data_pool;
magma_int_t* id_pool;
float** data_pool_array = NULL;
magma_int_t** id_pool_array = NULL;
magma_int_t num_blocks = (length-1)/(zamax) + 1;
// creat pools(data and index) to store the result of each thread blocks
magma_smalloc(&data_pool, num_blocks * batchCount);
magma_imalloc(&id_pool, num_blocks * batchCount);
magma_malloc((void**)&data_pool_array, batchCount * sizeof(*data_pool_array));
magma_malloc((void**)&id_pool_array, batchCount * sizeof(*id_pool_array));
#if defined(PRECISION_z) || defined(PRECISION_d)
magma_sset_pointer( data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue );
#else
magma_sset_pointer( data_pool_array, data_pool, 1, 0, 0, num_blocks, batchCount, queue );
#endif
magma_iset_pointer( id_pool_array, id_pool, 1, 0, 0, num_blocks, batchCount, queue );
if ( num_blocks > zamax)
{
fprintf( stderr, "%s: length(=%d), num_blocks(=%d) is too big > zamax(=%d), the second layer reduction can not be launched, Plz incread zamax\n",
__func__, int(length), int(num_blocks), int(zamax));
}
else
{
// first level tree reduction
dim3 grid(num_blocks, 1, batchCount);
dim3 threads(zamax, 1, 1);
tree_isamax_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
if ( num_blocks > 1)
{
// second level tree reduction
dim3 grid2(1, 1, batchCount);
tree_isamax_kernel2_batched
<<< grid2, threads, 0, queue->cuda_stream() >>>
(num_blocks, step, ipiv_array, info_array, gbstep, data_pool_array, id_pool_array);
}
}
magma_free(data_pool);
magma_free(id_pool);
magma_free(data_pool_array);
magma_free(id_pool_array);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ISAMAX find the index of max absolute value of elements in x and store the index in ipiv
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
length INTEGER
On entry, length specifies the size of vector x. length >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
the offset of ipiv
@param[in]
lda INTEGER
The leading dimension of each array A, internal use to find the starting position of x.
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
gbstep INTEGER
the offset of info, internal use
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_isamax_batched(magma_int_t length,
float **x_array, magma_int_t incx,
magma_int_t step, magma_int_t lda,
magma_int_t** ipiv_array, magma_int_t *info_array,
magma_int_t gbstep, magma_int_t batchCount, magma_queue_t queue)
{
if (length == 0 ) return 0;
dim3 grid(1, 1, batchCount);
dim3 threads(zamax, 1, 1);
#if 1
int chunk = magma_ceildiv( length, zamax );
isamax_kernel_batched<<< grid, threads, zamax * (sizeof(float) + sizeof(int)), queue->cuda_stream() >>>
(length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
#else
// the magma_isamax_lg_batched is faster but when cuda launch it as 2 kernels the white space time between these 2 kernels and the next kernel is larger than using the isamax_kernel for that today we are using only isamax_kernel
if ( length <= 10 * zamax )
{
int chunk = magma_ceildiv( length, zamax );
isamax_kernel_batched<<< grid, threads, zamax * (sizeof(float) + sizeof(magma_int_t)), queue->cuda_stream() >>>
(length, chunk, x_array, incx, step, lda, ipiv_array, info_array, gbstep);
}
else
{
magma_isamax_lg_batched(length, x_array, incx, step, lda, ipiv_array, info_array, gbstep, batchCount);
}
#endif
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void sswap_kernel_batched(magma_int_t n, float **x_array, magma_int_t incx, magma_int_t step, magma_int_t** ipiv_array)
{
float *x = x_array[blockIdx.z];
magma_int_t *ipiv = ipiv_array[blockIdx.z];
__shared__ int jp;
if (threadIdx.x == 0)
{
jp = ipiv[step] - 1;
//if (blockIdx.z == 1) printf("jp=%d", jp);
}
__syncthreads();
if (jp == step) return; // no pivot
int id = threadIdx.x;
if (id < n) {
float tmp = x[jp + incx*id];
x[jp + incx*id] = x[step + incx*id];
x[step + incx*id] = tmp;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
sswap two row in x. index (ipiv[step]-1)-th and index step -th
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
n INTEGER
On entry, n specifies the size of vector x. n >= 0.
@param[in]
x_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_sswap_batched(magma_int_t n, float **x_array, magma_int_t incx,
magma_int_t step, magma_int_t** ipiv_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
sswap two row: (ipiv[step]-1)th and step th
*/
if ( n > MAX_NTHREADS)
{
fprintf( stderr, "%s nb=%d > %d, not supported\n", __func__, int(n), int(MAX_NTHREADS) );
return -15;
}
dim3 grid(1,1, batchCount);
dim3 threads(zamax, 1, 1);
sswap_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
(n, x_array, incx, step, ipiv_array);
return 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void sscal_sger_kernel_batched(int m, int n, int step, float **dA_array, int lda, magma_int_t *info_array, int gbstep)
{
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
float *A_start = dA_array[blockIdx.z];
float *A = &(A_start[step + step * lda]);
float *shared_y = shared_data;
int tx = threadIdx.x;
int gbidx = blockIdx.x*MAX_NTHREADS + threadIdx.x;
if (tx < n) {
shared_y[tx] = A[lda * tx];
}
__syncthreads();
if (shared_y[0] == MAGMA_S_ZERO) {
info_array[blockIdx.z] = step + gbstep + 1;
return;
}
if (gbidx < m && gbidx > 0) {
float reg = MAGMA_S_ZERO;
reg = A[gbidx];
reg *= MAGMA_S_DIV(MAGMA_S_ONE, shared_y[0]);
A[gbidx] = reg;
#pragma unroll
for (int i=1; i < n; i++) {
//A[gbidx + i*lda] = A[gbidx + i*lda] - shared_y[i] * reg; //cuda give wrong results with this one
//A[gbidx + i*lda] -= shared_y[i] * reg; //cuda give wrong results with this one
A[gbidx + i*lda] += (MAGMA_S_NEG_ONE) * shared_y[i] * reg;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_sscal_sger_batched(magma_int_t m, magma_int_t n, magma_int_t step,
float **dA_array, magma_int_t lda,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged sscal and sger the two kernels
1) sscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( n == 0) return 0;
if ( n > MAX_NTHREADS )
{
fprintf( stderr, "%s nb=%d, > %d, not supported\n", __func__, int(n), int(MAX_NTHREADS) );
return -15;
}
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
size_t shared_size = sizeof(float)*(n);
dim3 grid(nchunk, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
sscal_sger_kernel_batched
<<< grid, threads, shared_size, queue->cuda_stream() >>>
(m, n, step, dA_array, lda, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__
void sgetf2trsm_kernel_batched(int ib, int n, float **dA_array, int step, int lda)
{
/*
this kernel does the safe nonblocked TRSM operation
B = A^-1 * B
*/
float *A_start = dA_array[blockIdx.z];
float *A = &(A_start[step + step * lda]);
float *B = &(A_start[step + (step+ib) * lda]);
float *shared_a = shared_data;
float *shared_b = shared_data+ib*ib;
int tid = threadIdx.x;
int i,d;
// Read A and B at the same time to the shared memory (shared_a shared_b)
// note that shared_b = shared_a+ib*ib so its contiguous
// I can make it in one loop reading
if ( tid < ib) {
#pragma unroll
for (i=0; i < n+ib; i++) {
shared_a[tid + i*ib] = A[tid + i*lda];
}
}
__syncthreads();
if (tid < n) {
#pragma unroll
for (d=0; d < ib-1; d++) {
for (i=d+1; i < ib; i++) {
shared_b[i+tid*ib] += (MAGMA_S_NEG_ONE) * shared_a[i+d*ib] * shared_b[d+tid*ib];
}
}
}
__syncthreads();
// write back B
if ( tid < ib) {
#pragma unroll
for (i=0; i < n; i++) {
B[tid + i*lda] = shared_b[tid + i*ib];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
sgetf2trsm solves one of the matrix equations on gpu
B = C^-1 * B
where C, B are part of the matrix A in dA_array,
This version load C, B into shared memory and solve it
and copy back to GPU device memory.
This is an internal routine that might have many assumption.
Arguments
---------
@param[in]
ib INTEGER
The number of rows/columns of each matrix C, and rows of B. ib >= 0.
@param[in]
n INTEGER
The number of columns of each matrix B. n >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[in]
step INTEGER
The starting address of matrix C in A. LDDA >= max(1,M).
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C" void
magma_sgetf2trsm_batched(magma_int_t ib, magma_int_t n, float **dA_array,
magma_int_t step, magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
/*
*/
if ( n == 0 || ib == 0 ) return;
size_t shared_size = sizeof(float)*(ib*(ib+n));
// TODO TODO TODO
if ( shared_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 46K leaving 2K for extra
{
fprintf( stderr, "%s: error out of shared memory\n", __func__ );
return;
}
dim3 grid(1, 1, batchCount);
dim3 threads(max(n,ib), 1, 1);
sgetf2trsm_kernel_batched
<<< grid, threads, shared_size, queue->cuda_stream() >>>
(ib, n, dA_array, step, ldda);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
zupdate_device(int m, int step, float* x, int ldx, float *A, int lda)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
int indx;
//float reg = MAGMA_S_ZERO;
// update the current column by all the previous one
#pragma unroll
for (int i=0; i < step; i++) {
for (int s=0; s < nchunk; s++)
{
indx = tid + s * MAX_NTHREADS;
if ( indx > i && indx < m ) {
A[indx] -= A[i] * x[indx + i*ldx];
//printf(" @ step %d tid %d updating x[tid]*y[i]=A %5.3f %5.3f = %5.3f at i %d\n", step, tid, x[tid + i*ldx], A[i], A[tid],i);
}
}
__syncthreads();
}
//printf(" @ step %d tid %d adding %5.3f to A %5.3f make it %5.3f\n",step,tid,-reg,A[tid],A[tid]-reg);
}
////////////////////////////////////////////////////////////////////////////////////////////////////
static __device__ void
sscal5_device(int m, float* x, float alpha)
{
int tid = threadIdx.x;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) {
#if 0
x[tid + s * MAX_NTHREADS] *= MAGMA_S_DIV(MAGMA_S_ONE, alpha);
#else
x[tid + s * MAX_NTHREADS] = x[tid + s * MAX_NTHREADS]/alpha;
#endif
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
zcomputecolumn_kernel_shared_batched(int m, int paneloffset, int step, float **dA_array, int lda, magma_int_t **ipiv_array, magma_int_t *info_array, int gbstep)
{
int gboff = paneloffset+step;
magma_int_t *ipiv = ipiv_array[blockIdx.z];
float *A_start = dA_array[blockIdx.z];
float *A0j = &(A_start[paneloffset + (paneloffset+step) * lda]);
float *A00 = &(A_start[paneloffset + paneloffset * lda]);
float *shared_A = shared_data;
__shared__ float shared_x[zamax];
__shared__ int shared_idx[zamax];
__shared__ float alpha;
int tid = threadIdx.x;
// checkinfo to avoid computation of the singular matrix
if (info_array[blockIdx.z] != 0 ) return;
int nchunk = magma_ceildiv( m, MAX_NTHREADS );
// read the current column from dev to shared memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m ) shared_A[tid + s * MAX_NTHREADS] = A0j[tid + s * MAX_NTHREADS];
}
__syncthreads();
// update this column
if ( step > 0 ) {
zupdate_device( m, step, A00, lda, shared_A, 1);
__syncthreads();
}
// if ( tid < (m-step) ) // DO NO TPUT THE IF CONDITION HERE SINCE isamax_devfunc HAS __syncthreads INSIDE.
// So let all htreads call this routine it will handle correctly based on the size
// note that isamax need only 128 threads, s
isamax_devfunc(m-step, shared_A+step, 1, shared_x, shared_idx);
if (tid == 0) {
ipiv[gboff] = shared_idx[0] + gboff + 1; // Fortran Indexing
alpha = shared_A[shared_idx[0]+step];
//printf("@ step %d ipiv=%d where gboff=%d shared_idx %d alpha %5.3f\n",step,ipiv[gboff],gboff,shared_idx[0],alpha);
if (shared_x[0] == MAGMA_D_ZERO) {
info_array[blockIdx.z] = shared_idx[0] + gboff + gbstep + 1;
}
}
__syncthreads();
if (shared_x[0] == MAGMA_D_ZERO) return;
__syncthreads();
// DO NO PUT THE IF CONDITION HERE SINCE isamax_devfunc HAS __syncthreads INSIDE.
sscal5_device( m-step, shared_A+step, alpha);
// put back the pivot that has been scaled with itself menaing =1
if (tid == 0) shared_A[shared_idx[0] + step] = alpha;
__syncthreads();
// write back from shared to dev memory
for (int s=0; s < nchunk; s++)
{
if ( (tid + s * MAX_NTHREADS) < m )
{
A0j[tid + s * MAX_NTHREADS] = shared_A[tid + s * MAX_NTHREADS];
//printf("@ step %d tid %d updating A=x*alpha after A= %5.3f\n",step,tid,shared_A[tid]);
}
}
__syncthreads();
}
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
magma_int_t magma_scomputecolumn_batched(magma_int_t m, magma_int_t paneloffset, magma_int_t step,
float **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array, magma_int_t gbstep,
magma_int_t batchCount, magma_queue_t queue)
{
/*
Specialized kernel which merged sscal and sger the two kernels
1) sscale the first column vector A(1:M-1,0) with 1/A(0,0);
2) Performe a sger Operation for trailing matrix of A(1:M-1,1:N-1) += alpha*x*y**T, where
alpha := -1.0; x := A(1:M-1,0) and y:= A(0,1:N-1);
*/
if ( m == 0) return 0;
size_t all_shmem_size = zamax*(sizeof(float)+sizeof(int)) + (m+2)*sizeof(float);
if ( all_shmem_size > (MAX_SHARED_ALLOWED*1024) ) // limit the shared memory to 44K leaving 4K for extra
{
fprintf( stderr, "%s error out of shared memory\n", __func__ );
return -20;
}
size_t shared_size = sizeof(float)*m;
dim3 grid(1, 1, batchCount);
dim3 threads(min(m, MAX_NTHREADS), 1, 1);
zcomputecolumn_kernel_shared_batched
<<< grid, threads, shared_size, queue->cuda_stream() >>>
(m, paneloffset, step, dA_array, lda, ipiv_array, info_array, gbstep);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void
kernel_sgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
float **dA_array, magma_int_t lda,
magma_int_t **ipiv_array,
magma_int_t *info_array)
{
magma_int_t *ipiv = ipiv_array[blockIdx.z];
int tx = threadIdx.x;
float *shared_A = shared_data;
float *A = dA_array[blockIdx.z];
float *shared_x = (float*)(shared_A + m * ib);
int *shared_idx = (int*)(shared_x + zamax);
float res;
int length;
__shared__ int jp;
// load data to shared memory
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
shared_A[tx + i * m] = A[tx + i * lda];
//printf("shared_A=%f ", shared_A[tx + i * m]);
}
}
__syncthreads();
for (int j=0; j < ib; j++)
{
length = m - j;
int offset = j + j*m;
//======================================
//find max
if (tx < zamax)
{
if ( tx < length)
{
res = shared_A[tx + offset];
// printf("res=%f\n", res);
shared_x[tx] = fabs(MAGMA_S_REAL(res)) + fabs(MAGMA_S_IMAG(res));
shared_idx[tx] = tx;
}
else
{
shared_x[tx] = 0.0;
shared_idx[tx] = 0;
}
}
__syncthreads();
if (length >= zamax) // there are more than 128 threads working ==> all shared_x shared_idx are initialized here so I can call the fixed getidmax
magma_getidmax<zamax>(tx, shared_x, shared_idx);
else
magma_getidmax_n(min(zamax,length), tx, shared_x, shared_idx);
if (tx == 0)
{
jp = shared_idx[0];
if (shared_A[jp + offset] == 0.0) printf("error, A(jp,j) == 0.0\n");
ipiv[j] = j + (jp + 1); // Fortran Indexing
//if (blockIdx.x == 1) printf("jp=%d ", jp + j + 1);
}
__syncthreads();
//======================================
if ( jp != 0) //swap
{
if (tx < ib) {
//printf("A[jp]= %f, A[j]=%f, jp=%d\n", shared_A[jp + j + tx*m], shared_A[j + tx*m], jp);
float tmp = shared_A[jp + j + tx*m];
shared_A[jp + j + tx*m] = shared_A[j + tx*m];
shared_A[j + tx*m] = tmp;
}
}
__syncthreads();
//======================================
// Ger
if (tx < length && tx > 0)
{
res = shared_A[tx + offset];
res *= MAGMA_S_DIV(MAGMA_S_ONE, shared_A[0 + offset]); // scaling
shared_A[tx + offset] = res;
#pragma unroll 8
for (int i=1; i < ib-j; i++)
{
shared_A[tx + i*m + offset] += (MAGMA_S_NEG_ONE) * shared_A[i*m + offset] * res;
//printf("res= %f, shared_A=%f\n", res, shared_A[i*m + offset]);
}
}
__syncthreads();
} // end of j
//======================================
// write back
if (tx < m)
{
#pragma unroll 8
for (int i=0; i < ib; i++)
{
A[tx + i * lda] = shared_A[tx + i * m];
}
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
SGETF2_SM computes an LU factorization of a general M-by-N matrix A
using partial pivoting with row interchanges.
The factorization has the form
A = P * L * U
where P is a permutation matrix, L is lower triangular with unit
diagonal elements (lower trapezoidal if m > n), and U is upper
triangular (upper trapezoidal if m < n).
This is the right-looking Level 3 BLAS version of the algorithm.
This is a batched version that factors batchCount M-by-N matrices in parallel.
dA, ipiv, and info become arrays with one entry per matrix.
This version load entire matrix (m*ib) into shared memory and factorize it
with pivoting and copy back to GPU device memory.
Arguments
---------
@param[in]
m INTEGER
The number of rows of each matrix A. M >= 0.
@param[in]
ib INTEGER
The number of columns of each matrix A. ib >= 0.
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array on the GPU, dimension (LDDA,N).
On entry, each pointer is an M-by-N matrix to be factored.
On exit, the factors L and U from the factorization
A = P*L*U; the unit diagonal elements of L are not stored.
@param[in]
ldda INTEGER
The leading dimension of each array A. LDDA >= max(1,M).
@param[out]
ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices.
Each is an INTEGER array, dimension (min(M,N))
The pivot indices; for 1 <= i <= min(M,N), row i of the
matrix was interchanged with row IPIV(i).
@param[out]
info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices.
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value
or another error occured, such as memory allocation failed.
- > 0: if INFO = i, U(i,i) is exactly zero. The factorization
has been completed, but the factor U is exactly
singular, and division by zero will occur if it is used
to solve a system of equations.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_sgesv_aux
********************************************************************/
extern "C"
magma_int_t magma_sgetf2_sm_batched(
magma_int_t m, magma_int_t ib,
float **dA_array, magma_int_t ldda,
magma_int_t **ipiv_array,
magma_int_t *info_array,
magma_int_t batchCount, magma_queue_t queue)
{
/*
load entire matrix (m*ib) into shared memory and factorize it with pivoting and copy back.
*/
size_t shared_size = sizeof(float) * m * ib + (zamax) * (sizeof(float) + sizeof(int)) + sizeof(int);
if (shared_size > 47000)
{
fprintf( stderr, "%s: shared memory = %d, exceeds 48K, kernel cannot run\n", __func__, int(shared_size) );
return 1;
}
dim3 grid(1,1, batchCount);
dim3 threads(max(max(zamax, m), ib), 1, 1);
kernel_sgetf2_sm_batched<<< grid, threads, shared_size, queue->cuda_stream() >>>
( m, ib, dA_array, ldda, ipiv_array, info_array);
return 0;
}
|
502657dab2c6d5638f3e71f0ca5c9068b2fe3898.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <hip/hip_runtime.h>
#include "mpi.h"
//
#define xyz2i(x, y, z) (((z) + 1) * (bx + 2) * (by + 2) + ((y) + 1) * (bx + 2) + (x) + 1)
//
#define ijk2i(i, j, k) ((k) * px * py + (j) * px + (i))
#define i2k(i) ((i) / (px * py))
#define i2j(i) (((i) - i2k(i) * px * py) / px)
#define i2i(i) (((i) - i2k(i) * px * py) % px)
#define max(x, y) ((x) > (y) ? (x) : (y))
#define is_left (i2i(id) == 0)
#define is_right (i2i(id) == px - 1)
#define is_front (i2j(id) == 0)
#define is_back (i2j(id) == py - 1)
#define is_down (i2k(id) == 0)
#define is_up (i2k(id) == pz - 1)
#define N_CHARS_IN_DOUBLE 14
#define vector_print(vec, n, format) \
{ \
int i; \
for (i = 0; i < (n); i++) \
{ \
printf(#format" ", (vec)[i]); \
} \
printf("\n"); \
}
#define vector_print_64F(vec, n) vector_print(vec, n, %7le)
#define vector_print_32S(vec, n) vector_print(vec, n, %d)
#define CSC(call) \
do \
{ \
hipError_t res = call; \
if (res != hipSuccess) \
{ \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, hipGetErrorString(res)); \
fflush(stderr); \
hipFree(MEM_GPU); \
free(MEM_CPU); \
return 0; \
} \
} while(0)
//
__global__ void kernel(const double *data_prev, double *data,
int bx, int by, int bz,
double hx, double hy, double hz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
idz = blockIdx.z * blockDim.z + threadIdx.z,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
offsetz = blockDim.z * gridDim.z,
x, y, z;
for(z = idz; z < bz; z += offsetz)
for(y = idy; y < by; y += offsety)
for(x = idx; x < bx; x += offsetx)
data[xyz2i(x, y, z)] = ((data_prev[xyz2i(x + 1, y, z)] + data_prev[xyz2i(x - 1, y, z)]) / (hx * hx) +
(data_prev[xyz2i(x, y + 1, z)] + data_prev[xyz2i(x, y - 1, z)]) / (hy * hy) +
(data_prev[xyz2i(x, y, z + 1)] + data_prev[xyz2i(x, y, z - 1)]) / (hz * hz)) /
(2 / (hx * hx) + 2 / (hy * hy) + 2 / (hz * hz));
}
// val
__global__ void vector_set(double *dst, int n,
double val)
{
int id = blockIdx.x * blockDim.x + threadIdx.x,
offset = blockDim.x * gridDim.x,
i;
for(i = id; i < n; i += offset)
dst[i] = val;
}
//
__global__ void data_diff_and_fabs(const double *a, const double *b, double *c,
int bx, int by, int bz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
idz = blockIdx.z * blockDim.z + threadIdx.z,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
offsetz = blockDim.z * gridDim.z,
x, y, z;
for(z = idz; z < bz; z += offsetz)
for(y = idy; y < by; y += offsety)
for(x = idx; x < bx; x += offsetx)
c[z * bx * by + y * bx + x] = fabs(a[xyz2i(x, y, z)] - b[xyz2i(x, y, z)]);
}
// dst val
__global__ void mat_set(double *dst, int stepdst, int lddst,
int n, int m,
double val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
x, y;
for(y = idy; y < m; y += offsety)
for(x = idx; x < n; x += offsetx)
{
dst[y * lddst + x * stepdst] = val;
}
}
// src dst
__global__ void mat_copy(const double *src, int stepsrc, int ldsrc,
double *dst, int stepdst, int lddst,
int n, int m)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
x, y;
for(y = idy; y < m; y += offsety)
for(x = idx; x < n; x += offsetx)
{
dst[y * lddst + x * stepdst] = src[y * ldsrc + x * stepsrc];
}
}
int main(int argc, char *argv[])
{
int id, root = 0, numproc,
i,
x, y, z,
device_count,
buf_size, block_size,
px, py, pz, //
bx, by, bz; //
void *MEM_CPU = NULL, *MEM_GPU = NULL;
double *data, *data_prev, *data_tmp, *data_reduce, *buf_dev,
*data_host,
*buf_left, *buf_right, *buf_front, *buf_back, *buf_up, *buf_down;
char *buf_char;
thrust::device_ptr<double> data_ptr;
double eps,
dx, dy, dz, //
ud, uu, ul, ur, uf, ub, //
u0, //
hx, hy, hz, //
max_u, max_global; //
int *blocklens;
MPI_Aint *indicies;
MPI_File fp;
MPI_Datatype filetype;
dim3 blocks3D = dim3(8, 8, 8),
threads3D = dim3(8, 8, 8),
blocks2D = dim3(256, 256),
threads2D = dim3(32, 8),
blocks1D = dim3(256),
threads1D = dim3(256);
char name_out[128];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
hipGetDeviceCount(&device_count); //
hipSetDevice(id % device_count); //
if (id == root)
{
scanf("%d %d %d\n", &px, &py, &pz);
scanf("%d %d %d\n", &bx, &by, &bz);
scanf("%s\n", name_out);
scanf("%le\n", &eps);
scanf("%lf %lf %lf\n", &dx, &dy, &dz);
scanf("%lf %lf %lf %lf %lf %lf\n",
&ud, &uu, &ul, &ur, &uf, &ub);
scanf("%lf", &u0);
hx = dx / (px * bx);
hy = dy / (py * by);
hz = dz / (pz * bz);
}
MPI_Bcast(&bx, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&by, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&bz, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&px, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&py, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&pz, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&ul, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&ur, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&uf, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&ub, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&ud, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&uu, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&u0, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&hx, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&hy, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&hz, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&eps, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(name_out, 128, MPI_CHAR, root, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
//
block_size = (bx + 2) * (by + 2) * (bz + 2);
MEM_CPU = malloc((block_size + 2 * (by * bx + bz * by + bz * bx)) * sizeof(double) +
by * bz * sizeof(int) +
by * bz * sizeof(MPI_Aint) +
bx * by * bz * N_CHARS_IN_DOUBLE * sizeof(char));
if(MEM_CPU == NULL)
{
fprintf(stderr, "Error: Not Enough memory on CPU%d\n", id);
fflush(stderr);
return 0;
}
data_host = (double *)MEM_CPU;
buf_left = data_host + block_size;
buf_right = buf_left + bz * by;
buf_front = buf_right + bz * by;
buf_back = buf_front + bz * bx;
buf_up = buf_back + bz * bx;
buf_down = buf_up + by * bx;
blocklens = (int *)(buf_down + by * bx);
indicies = (MPI_Aint *)(blocklens + by * bz);
buf_char = (char *)(indicies + by * bz);
//
buf_size = max(max(bx * by, by * bz), max(bx * bz, px * py * pz));
CSC(hipMalloc(&MEM_GPU, (2 * block_size + bx * by * bz + buf_size) * sizeof(double)));
data = (double *)MEM_GPU;
data_prev = data + block_size;
data_reduce = data_prev + block_size;
buf_dev = data_reduce + bx * by * bz;
hipLaunchKernelGGL(( vector_set), dim3(blocks1D), dim3(threads1D), 0, 0, data, block_size, u0);
hipLaunchKernelGGL(( vector_set), dim3(blocks1D), dim3(threads1D), 0, 0, data_prev, block_size, u0);
if (is_left) // left
{
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(-1, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ul);
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data_prev + xyz2i(-1, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ul);
}
if (is_right) // right
{
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(bx, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ur);
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data_prev + xyz2i(bx, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ur);
}
if (is_front) // front
{
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, -1, 0), 1, (bx + 2) * (by + 2),
bx, bz,
uf);
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data_prev + xyz2i(0, -1, 0), 1, (bx + 2) * (by + 2),
bx, bz,
uf);
}
if (is_back) // back
{
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, by, 0), 1, (bx + 2) * (by + 2),
bx, bz,
ub);
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data_prev + xyz2i(0, by, 0), 1, (bx + 2) * (by + 2),
bx, bz,
ub);
}
if (is_down) // down
{
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, 0, -1), 1, bx + 2,
bx, by,
ud);
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data_prev + xyz2i(0, 0, -1), 1, bx + 2,
bx, by,
ud);
}
if (is_up) // up
{
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, 0, bz), 1, bx + 2,
bx, by,
uu);
hipLaunchKernelGGL(( mat_set), dim3(blocks2D), dim3(threads2D), 0, 0, data_prev + xyz2i(0, 0, bz), 1, bx + 2,
bx, by,
uu);
}
do //
{
/* : */
if (!is_left)
{
/* left -> bufer */
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, 0, 0), bx + 2, (bx + 2) * (by + 2),
buf_dev, 1, by,
by, bz);
hipMemcpy(buf_left, buf_dev, by * bz * sizeof(double), hipMemcpyDeviceToHost);
MPI_Send(buf_left, by * bz, MPI_DOUBLE, ijk2i(i2i(id) - 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_front)
{
/* front -> bufer */
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, 0, 0), 1, (bx + 2) * (by + 2),
buf_dev, 1, bx,
bx, bz);
hipMemcpy(buf_front, buf_dev, bx * bz * sizeof(double), hipMemcpyDeviceToHost);
MPI_Send(buf_front, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) - 1, i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_down)
{
/* down -> bufer */
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, 0, 0), 1, bx + 2,
buf_dev, 1, bx,
bx, by);
hipMemcpy(buf_down, buf_dev, bx * by * sizeof(double), hipMemcpyDeviceToHost);
MPI_Send(buf_down, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) - 1), 1, MPI_COMM_WORLD);
}
//==============================================================
if (!is_right)
{
MPI_Recv(buf_right, by * bz, MPI_DOUBLE, ijk2i(i2i(id) + 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD, &status);
/* right update */
hipMemcpy(buf_dev, buf_right, by * bz * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, buf_dev, 1, by,
data + xyz2i(bx, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz);
}
if (!is_back)
{
MPI_Recv(buf_back, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) + 1, i2k(id)), 1, MPI_COMM_WORLD, &status);
/* back update */
hipMemcpy(buf_dev, buf_back, bx * bz * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, buf_dev, 1, bx,
data + xyz2i(0, by, 0), 1, (bx + 2) * (by + 2),
bx, bz);
}
if (!is_up)
{
MPI_Recv(buf_up, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) + 1), 1, MPI_COMM_WORLD, &status);
/* up update */
hipMemcpy(buf_dev, buf_up, bx * by * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, buf_dev, 1, bx,
data + xyz2i(0, 0, bz), 1, bx + 2,
bx, by);
}
//==============================================================
if (!is_right)
{
/* right -> bufer */
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(bx - 1, 0, 0), bx + 2, (bx + 2) * (by + 2),
buf_dev, 1, by,
by, bz);
hipMemcpy(buf_right, buf_dev, by * bz * sizeof(double), hipMemcpyDeviceToHost);
MPI_Send(buf_right, by * bz, MPI_DOUBLE, ijk2i(i2i(id) + 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_back)
{
/* back -> bufer */
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, by - 1, 0), 1, (bx + 2) * (by + 2),
buf_dev, 1, bx,
bx, bz);
hipMemcpy(buf_back, buf_dev, bx * bz * sizeof(double), hipMemcpyDeviceToHost);
MPI_Send(buf_back, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) + 1, i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_up)
{
/* up -> bufer */
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, data + xyz2i(0, 0, bz - 1), 1, bx + 2,
buf_dev, 1, bx,
bx, by);
hipMemcpy(buf_up, buf_dev, bx * by * sizeof(double), hipMemcpyDeviceToHost);
MPI_Send(buf_up, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) + 1), 1, MPI_COMM_WORLD);
}
//==============================================================
if (!is_left)
{
MPI_Recv(buf_left, by * bz, MPI_DOUBLE, ijk2i(i2i(id) - 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD, &status);
/* left update */
hipMemcpy(buf_dev, buf_left, by * bz * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, buf_dev, 1, by,
data + xyz2i(-1, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz);
}
if (!is_front)
{
MPI_Recv(buf_front, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) - 1, i2k(id)), 1, MPI_COMM_WORLD, &status);
/* front update */
hipMemcpy(buf_dev, buf_front, bx * bz * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, buf_dev, 1, bx,
data + xyz2i(0, -1, 0), 1, (bx + 2) * (by + 2),
bx, bz);
}
if (!is_down)
{
MPI_Recv(buf_down, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) - 1), 1, MPI_COMM_WORLD, &status);
/* down update */
hipMemcpy(buf_dev, buf_down, bx * by * sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mat_copy), dim3(blocks2D), dim3(threads2D), 0, 0, buf_dev, 1, bx,
data + xyz2i(0, 0, -1), 1, bx + 2,
bx, by);
}
data_tmp = data;
data = data_prev;
data_prev = data_tmp;
hipLaunchKernelGGL(( kernel), dim3(blocks3D), dim3(threads3D), 0, 0, data_prev, data,
bx, by, bz,
hx, hy, hz);
CSC(hipGetLastError());
hipLaunchKernelGGL(( data_diff_and_fabs), dim3(blocks3D), dim3(threads3D), 0, 0, data_prev, data, data_reduce,
bx, by, bz);
CSC(hipGetLastError());
// GPU
data_ptr = thrust::device_pointer_cast(data_reduce);
max_u = *thrust::max_element(data_ptr, data_ptr + bx * by * bz);
//
MPI_Allreduce(&max_u, &max_global, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
}
while(max_global >= eps);
MPI_Barrier(MPI_COMM_WORLD);
CSC(hipMemcpy(data_host, data, block_size * sizeof(double), hipMemcpyDeviceToHost));
/* */
memset(buf_char, ' ', bx * by * bz * N_CHARS_IN_DOUBLE * sizeof(char));
for (z = 0; z < bz; z++)
{
for (y = 0; y < by; y++)
{
for (x = 0; x < bx; x++)
{
sprintf(buf_char + (z * bx * by + y * bx + x) * N_CHARS_IN_DOUBLE,
"%7le ", data_host[xyz2i(x, y, z)]);
}
if (is_right)
{
buf_char[(z * bx * by + y * bx + x) * N_CHARS_IN_DOUBLE - 1] = '\n';
}
}
}
for (i = 0; i < bx * by * bz * N_CHARS_IN_DOUBLE; i++)
{
if (buf_char[i] == '\0')
buf_char[i] = ' ';
}
for(i = 0; i < by * bz; i++)
blocklens[i] = bx * N_CHARS_IN_DOUBLE;
for (z = 0; z < bz; z++)
{
for (y = 0; y < by; y++)
{
indicies[z * by + y] = i2k(id) * px * py * bx * by * bz * N_CHARS_IN_DOUBLE + // global offset
z * px * py * bx * by * N_CHARS_IN_DOUBLE +
i2j(id) * px * bx * by * N_CHARS_IN_DOUBLE +
y * px * bx * N_CHARS_IN_DOUBLE +
i2i(id) * bx * N_CHARS_IN_DOUBLE;
}
}
MPI_Type_create_hindexed(by * bz, blocklens, indicies, MPI_CHAR, &filetype);
MPI_Type_commit(&filetype);
MPI_File_delete(name_out, MPI_INFO_NULL);
MPI_File_open(MPI_COMM_WORLD, name_out, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp);
MPI_File_set_view(fp, 0, MPI_CHAR, filetype, "native", MPI_INFO_NULL);
MPI_File_write_all(fp, buf_char, bx * by * bz * N_CHARS_IN_DOUBLE, MPI_CHAR, &status); // ,
MPI_File_close(&fp);
free(MEM_CPU);
hipFree(MEM_GPU);
MPI_Finalize();
return 0;
}
| 502657dab2c6d5638f3e71f0ca5c9068b2fe3898.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include <cuda_runtime.h>
#include "mpi.h"
// индексация внутри блока
#define xyz2i(x, y, z) (((z) + 1) * (bx + 2) * (by + 2) + ((y) + 1) * (bx + 2) + (x) + 1)
// индексация по процессам
#define ijk2i(i, j, k) ((k) * px * py + (j) * px + (i))
#define i2k(i) ((i) / (px * py))
#define i2j(i) (((i) - i2k(i) * px * py) / px)
#define i2i(i) (((i) - i2k(i) * px * py) % px)
#define max(x, y) ((x) > (y) ? (x) : (y))
#define is_left (i2i(id) == 0)
#define is_right (i2i(id) == px - 1)
#define is_front (i2j(id) == 0)
#define is_back (i2j(id) == py - 1)
#define is_down (i2k(id) == 0)
#define is_up (i2k(id) == pz - 1)
#define N_CHARS_IN_DOUBLE 14
#define vector_print(vec, n, format) \
{ \
int i; \
for (i = 0; i < (n); i++) \
{ \
printf(#format" ", (vec)[i]); \
} \
printf("\n"); \
}
#define vector_print_64F(vec, n) vector_print(vec, n, %7le)
#define vector_print_32S(vec, n) vector_print(vec, n, %d)
#define CSC(call) \
do \
{ \
cudaError_t res = call; \
if (res != cudaSuccess) \
{ \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
fflush(stderr); \
cudaFree(MEM_GPU); \
free(MEM_CPU); \
return 0; \
} \
} while(0)
// Шаг алгоритма
__global__ void kernel(const double *data_prev, double *data,
int bx, int by, int bz,
double hx, double hy, double hz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
idz = blockIdx.z * blockDim.z + threadIdx.z,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
offsetz = blockDim.z * gridDim.z,
x, y, z;
for(z = idz; z < bz; z += offsetz)
for(y = idy; y < by; y += offsety)
for(x = idx; x < bx; x += offsetx)
data[xyz2i(x, y, z)] = ((data_prev[xyz2i(x + 1, y, z)] + data_prev[xyz2i(x - 1, y, z)]) / (hx * hx) +
(data_prev[xyz2i(x, y + 1, z)] + data_prev[xyz2i(x, y - 1, z)]) / (hy * hy) +
(data_prev[xyz2i(x, y, z + 1)] + data_prev[xyz2i(x, y, z - 1)]) / (hz * hz)) /
(2 / (hx * hx) + 2 / (hy * hy) + 2 / (hz * hz));
}
// Инициализация вектора значение val
__global__ void vector_set(double *dst, int n,
double val)
{
int id = blockIdx.x * blockDim.x + threadIdx.x,
offset = blockDim.x * gridDim.x,
i;
for(i = id; i < n; i += offset)
dst[i] = val;
}
// Создание тензора погрешностей
__global__ void data_diff_and_fabs(const double *a, const double *b, double *c,
int bx, int by, int bz)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
idz = blockIdx.z * blockDim.z + threadIdx.z,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
offsetz = blockDim.z * gridDim.z,
x, y, z;
for(z = idz; z < bz; z += offsetz)
for(y = idy; y < by; y += offsety)
for(x = idx; x < bx; x += offsetx)
c[z * bx * by + y * bx + x] = fabs(a[xyz2i(x, y, z)] - b[xyz2i(x, y, z)]);
}
// Инициализация матрицы dst значением val со сдвигами
__global__ void mat_set(double *dst, int stepdst, int lddst,
int n, int m,
double val)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
x, y;
for(y = idy; y < m; y += offsety)
for(x = idx; x < n; x += offsetx)
{
dst[y * lddst + x * stepdst] = val;
}
}
// Копирование матрицы src в матрицу dst со сдвигами
__global__ void mat_copy(const double *src, int stepsrc, int ldsrc,
double *dst, int stepdst, int lddst,
int n, int m)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x,
idy = blockIdx.y * blockDim.y + threadIdx.y,
offsetx = blockDim.x * gridDim.x,
offsety = blockDim.y * gridDim.y,
x, y;
for(y = idy; y < m; y += offsety)
for(x = idx; x < n; x += offsetx)
{
dst[y * lddst + x * stepdst] = src[y * ldsrc + x * stepsrc];
}
}
int main(int argc, char *argv[])
{
int id, root = 0, numproc,
i,
x, y, z,
device_count,
buf_size, block_size,
px, py, pz, // размер сетки процессов
bx, by, bz; // размер блока на один процесс
void *MEM_CPU = NULL, *MEM_GPU = NULL;
double *data, *data_prev, *data_tmp, *data_reduce, *buf_dev,
*data_host,
*buf_left, *buf_right, *buf_front, *buf_back, *buf_up, *buf_down;
char *buf_char;
thrust::device_ptr<double> data_ptr;
double eps,
dx, dy, dz, // размер области
ud, uu, ul, ur, uf, ub, // границы
u0, // начальное распределение
hx, hy, hz, // шаги
max_u, max_global; // максимальные значения
int *blocklens;
MPI_Aint *indicies;
MPI_File fp;
MPI_Datatype filetype;
dim3 blocks3D = dim3(8, 8, 8),
threads3D = dim3(8, 8, 8),
blocks2D = dim3(256, 256),
threads2D = dim3(32, 8),
blocks1D = dim3(256),
threads1D = dim3(256);
char name_out[128];
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numproc);
MPI_Comm_rank(MPI_COMM_WORLD, &id);
cudaGetDeviceCount(&device_count); // каждый процесс использует
cudaSetDevice(id % device_count); // одну из доступных видеокарт
if (id == root)
{
scanf("%d %d %d\n", &px, &py, &pz);
scanf("%d %d %d\n", &bx, &by, &bz);
scanf("%s\n", name_out);
scanf("%le\n", &eps);
scanf("%lf %lf %lf\n", &dx, &dy, &dz);
scanf("%lf %lf %lf %lf %lf %lf\n",
&ud, &uu, &ul, &ur, &uf, &ub);
scanf("%lf", &u0);
hx = dx / (px * bx);
hy = dy / (py * by);
hz = dz / (pz * bz);
}
MPI_Bcast(&bx, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&by, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&bz, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&px, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&py, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&pz, 1, MPI_INT, root, MPI_COMM_WORLD);
MPI_Bcast(&ul, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&ur, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&uf, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&ub, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&ud, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&uu, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&u0, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&hx, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&hy, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&hz, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(&eps, 1, MPI_DOUBLE, root, MPI_COMM_WORLD);
MPI_Bcast(name_out, 128, MPI_CHAR, root, MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
// размер блока с виртуальными ячейками
block_size = (bx + 2) * (by + 2) * (bz + 2);
MEM_CPU = malloc((block_size + 2 * (by * bx + bz * by + bz * bx)) * sizeof(double) +
by * bz * sizeof(int) +
by * bz * sizeof(MPI_Aint) +
bx * by * bz * N_CHARS_IN_DOUBLE * sizeof(char));
if(MEM_CPU == NULL)
{
fprintf(stderr, "Error: Not Enough memory on CPU%d\n", id);
fflush(stderr);
return 0;
}
data_host = (double *)MEM_CPU;
buf_left = data_host + block_size;
buf_right = buf_left + bz * by;
buf_front = buf_right + bz * by;
buf_back = buf_front + bz * bx;
buf_up = buf_back + bz * bx;
buf_down = buf_up + by * bx;
blocklens = (int *)(buf_down + by * bx);
indicies = (MPI_Aint *)(blocklens + by * bz);
buf_char = (char *)(indicies + by * bz);
// размер буфера на видеокарте
buf_size = max(max(bx * by, by * bz), max(bx * bz, px * py * pz));
CSC(cudaMalloc(&MEM_GPU, (2 * block_size + bx * by * bz + buf_size) * sizeof(double)));
data = (double *)MEM_GPU;
data_prev = data + block_size;
data_reduce = data_prev + block_size;
buf_dev = data_reduce + bx * by * bz;
vector_set<<<blocks1D, threads1D>>>(data, block_size, u0);
vector_set<<<blocks1D, threads1D>>>(data_prev, block_size, u0);
if (is_left) // left
{
mat_set<<<blocks2D, threads2D>>>(data + xyz2i(-1, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ul);
mat_set<<<blocks2D, threads2D>>>(data_prev + xyz2i(-1, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ul);
}
if (is_right) // right
{
mat_set<<<blocks2D, threads2D>>>(data + xyz2i(bx, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ur);
mat_set<<<blocks2D, threads2D>>>(data_prev + xyz2i(bx, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz,
ur);
}
if (is_front) // front
{
mat_set<<<blocks2D, threads2D>>>(data + xyz2i(0, -1, 0), 1, (bx + 2) * (by + 2),
bx, bz,
uf);
mat_set<<<blocks2D, threads2D>>>(data_prev + xyz2i(0, -1, 0), 1, (bx + 2) * (by + 2),
bx, bz,
uf);
}
if (is_back) // back
{
mat_set<<<blocks2D, threads2D>>>(data + xyz2i(0, by, 0), 1, (bx + 2) * (by + 2),
bx, bz,
ub);
mat_set<<<blocks2D, threads2D>>>(data_prev + xyz2i(0, by, 0), 1, (bx + 2) * (by + 2),
bx, bz,
ub);
}
if (is_down) // down
{
mat_set<<<blocks2D, threads2D>>>(data + xyz2i(0, 0, -1), 1, bx + 2,
bx, by,
ud);
mat_set<<<blocks2D, threads2D>>>(data_prev + xyz2i(0, 0, -1), 1, bx + 2,
bx, by,
ud);
}
if (is_up) // up
{
mat_set<<<blocks2D, threads2D>>>(data + xyz2i(0, 0, bz), 1, bx + 2,
bx, by,
uu);
mat_set<<<blocks2D, threads2D>>>(data_prev + xyz2i(0, 0, bz), 1, bx + 2,
bx, by,
uu);
}
do // основной цикл
{
/* обмен граничными условиями: отправка данных */
if (!is_left)
{
/* left -> bufer */
mat_copy<<<blocks2D, threads2D>>>(data + xyz2i(0, 0, 0), bx + 2, (bx + 2) * (by + 2),
buf_dev, 1, by,
by, bz);
cudaMemcpy(buf_left, buf_dev, by * bz * sizeof(double), cudaMemcpyDeviceToHost);
MPI_Send(buf_left, by * bz, MPI_DOUBLE, ijk2i(i2i(id) - 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_front)
{
/* front -> bufer */
mat_copy<<<blocks2D, threads2D>>>(data + xyz2i(0, 0, 0), 1, (bx + 2) * (by + 2),
buf_dev, 1, bx,
bx, bz);
cudaMemcpy(buf_front, buf_dev, bx * bz * sizeof(double), cudaMemcpyDeviceToHost);
MPI_Send(buf_front, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) - 1, i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_down)
{
/* down -> bufer */
mat_copy<<<blocks2D, threads2D>>>(data + xyz2i(0, 0, 0), 1, bx + 2,
buf_dev, 1, bx,
bx, by);
cudaMemcpy(buf_down, buf_dev, bx * by * sizeof(double), cudaMemcpyDeviceToHost);
MPI_Send(buf_down, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) - 1), 1, MPI_COMM_WORLD);
}
//==============================================================
if (!is_right)
{
MPI_Recv(buf_right, by * bz, MPI_DOUBLE, ijk2i(i2i(id) + 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD, &status);
/* right update */
cudaMemcpy(buf_dev, buf_right, by * bz * sizeof(double), cudaMemcpyHostToDevice);
mat_copy<<<blocks2D, threads2D>>>(buf_dev, 1, by,
data + xyz2i(bx, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz);
}
if (!is_back)
{
MPI_Recv(buf_back, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) + 1, i2k(id)), 1, MPI_COMM_WORLD, &status);
/* back update */
cudaMemcpy(buf_dev, buf_back, bx * bz * sizeof(double), cudaMemcpyHostToDevice);
mat_copy<<<blocks2D, threads2D>>>(buf_dev, 1, bx,
data + xyz2i(0, by, 0), 1, (bx + 2) * (by + 2),
bx, bz);
}
if (!is_up)
{
MPI_Recv(buf_up, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) + 1), 1, MPI_COMM_WORLD, &status);
/* up update */
cudaMemcpy(buf_dev, buf_up, bx * by * sizeof(double), cudaMemcpyHostToDevice);
mat_copy<<<blocks2D, threads2D>>>(buf_dev, 1, bx,
data + xyz2i(0, 0, bz), 1, bx + 2,
bx, by);
}
//==============================================================
if (!is_right)
{
/* right -> bufer */
mat_copy<<<blocks2D, threads2D>>>(data + xyz2i(bx - 1, 0, 0), bx + 2, (bx + 2) * (by + 2),
buf_dev, 1, by,
by, bz);
cudaMemcpy(buf_right, buf_dev, by * bz * sizeof(double), cudaMemcpyDeviceToHost);
MPI_Send(buf_right, by * bz, MPI_DOUBLE, ijk2i(i2i(id) + 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_back)
{
/* back -> bufer */
mat_copy<<<blocks2D, threads2D>>>(data + xyz2i(0, by - 1, 0), 1, (bx + 2) * (by + 2),
buf_dev, 1, bx,
bx, bz);
cudaMemcpy(buf_back, buf_dev, bx * bz * sizeof(double), cudaMemcpyDeviceToHost);
MPI_Send(buf_back, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) + 1, i2k(id)), 1, MPI_COMM_WORLD);
}
if (!is_up)
{
/* up -> bufer */
mat_copy<<<blocks2D, threads2D>>>(data + xyz2i(0, 0, bz - 1), 1, bx + 2,
buf_dev, 1, bx,
bx, by);
cudaMemcpy(buf_up, buf_dev, bx * by * sizeof(double), cudaMemcpyDeviceToHost);
MPI_Send(buf_up, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) + 1), 1, MPI_COMM_WORLD);
}
//==============================================================
if (!is_left)
{
MPI_Recv(buf_left, by * bz, MPI_DOUBLE, ijk2i(i2i(id) - 1, i2j(id), i2k(id)), 1, MPI_COMM_WORLD, &status);
/* left update */
cudaMemcpy(buf_dev, buf_left, by * bz * sizeof(double), cudaMemcpyHostToDevice);
mat_copy<<<blocks2D, threads2D>>>(buf_dev, 1, by,
data + xyz2i(-1, 0, 0), bx + 2, (bx + 2) * (by + 2),
by, bz);
}
if (!is_front)
{
MPI_Recv(buf_front, bx * bz, MPI_DOUBLE, ijk2i(i2i(id), i2j(id) - 1, i2k(id)), 1, MPI_COMM_WORLD, &status);
/* front update */
cudaMemcpy(buf_dev, buf_front, bx * bz * sizeof(double), cudaMemcpyHostToDevice);
mat_copy<<<blocks2D, threads2D>>>(buf_dev, 1, bx,
data + xyz2i(0, -1, 0), 1, (bx + 2) * (by + 2),
bx, bz);
}
if (!is_down)
{
MPI_Recv(buf_down, bx * by, MPI_DOUBLE, ijk2i(i2i(id), i2j(id), i2k(id) - 1), 1, MPI_COMM_WORLD, &status);
/* down update */
cudaMemcpy(buf_dev, buf_down, bx * by * sizeof(double), cudaMemcpyHostToDevice);
mat_copy<<<blocks2D, threads2D>>>(buf_dev, 1, bx,
data + xyz2i(0, 0, -1), 1, bx + 2,
bx, by);
}
data_tmp = data;
data = data_prev;
data_prev = data_tmp;
kernel<<<blocks3D, threads3D>>>(data_prev, data,
bx, by, bz,
hx, hy, hz);
CSC(cudaGetLastError());
data_diff_and_fabs<<<blocks3D, threads3D>>>(data_prev, data, data_reduce,
bx, by, bz);
CSC(cudaGetLastError());
// высчитываем локальный максимум на GPU
data_ptr = thrust::device_pointer_cast(data_reduce);
max_u = *thrust::max_element(data_ptr, data_ptr + bx * by * bz);
// берём глобальный максимум
MPI_Allreduce(&max_u, &max_global, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
}
while(max_global >= eps);
MPI_Barrier(MPI_COMM_WORLD);
CSC(cudaMemcpy(data_host, data, block_size * sizeof(double), cudaMemcpyDeviceToHost));
/* Подготовка текстового буфера */
memset(buf_char, ' ', bx * by * bz * N_CHARS_IN_DOUBLE * sizeof(char));
for (z = 0; z < bz; z++)
{
for (y = 0; y < by; y++)
{
for (x = 0; x < bx; x++)
{
sprintf(buf_char + (z * bx * by + y * bx + x) * N_CHARS_IN_DOUBLE,
"%7le ", data_host[xyz2i(x, y, z)]);
}
if (is_right)
{
buf_char[(z * bx * by + y * bx + x) * N_CHARS_IN_DOUBLE - 1] = '\n';
}
}
}
for (i = 0; i < bx * by * bz * N_CHARS_IN_DOUBLE; i++)
{
if (buf_char[i] == '\0')
buf_char[i] = ' ';
}
for(i = 0; i < by * bz; i++)
blocklens[i] = bx * N_CHARS_IN_DOUBLE;
for (z = 0; z < bz; z++)
{
for (y = 0; y < by; y++)
{
indicies[z * by + y] = i2k(id) * px * py * bx * by * bz * N_CHARS_IN_DOUBLE + // global offset
z * px * py * bx * by * N_CHARS_IN_DOUBLE +
i2j(id) * px * bx * by * N_CHARS_IN_DOUBLE +
y * px * bx * N_CHARS_IN_DOUBLE +
i2i(id) * bx * N_CHARS_IN_DOUBLE;
}
}
MPI_Type_create_hindexed(by * bz, blocklens, indicies, MPI_CHAR, &filetype);
MPI_Type_commit(&filetype);
MPI_File_delete(name_out, MPI_INFO_NULL);
MPI_File_open(MPI_COMM_WORLD, name_out, MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp);
MPI_File_set_view(fp, 0, MPI_CHAR, filetype, "native", MPI_INFO_NULL);
MPI_File_write_all(fp, buf_char, bx * by * bz * N_CHARS_IN_DOUBLE, MPI_CHAR, &status); // надеюсь, диск выдержит
MPI_File_close(&fp);
free(MEM_CPU);
cudaFree(MEM_GPU);
MPI_Finalize();
return 0;
}
|
4273dd8d3736086e60cd328b0bbe5b574a5c766e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <errno.h>
#include "config.h"
#include "options.h"
#include <cutil.h>
#define is_suboptimal(a, b, c) abs(a - b) <= c
FILE *energyFile;
#define ENERGYFILE "energies.dat"
// #define DIFF3
// #define SHARED_Z // 2000
// #define SHARED_ENERGY // 8000
// #define SHARED_OFFSET // 8000
// #define SPARSE_CLOSED
// #define MAX_DIFF 200
#define SHMEM 1000
/* Always needed
====================================================================== */
#define min(A, B) ((A) < (B) ? (A) : (B))
#define max(A, B) ((A) > (B) ? (A) : (B))
/* Input handling
====================================================================== */
void convert_input(int start, char *z, int n);
/* Correct incomplete phases in adpc
====================================================================== */
#define decode(X) ((X)-'0')
/* Memory handling
====================================================================== */
/* wrappers for standard C functions
------------------------------------------- */
void memerr_exit(char *f);
void *mcalloc(size_t nobj, size_t size);
void *mmalloc(size_t size);
void *mrealloc(void *q, size_t size);
/* Memory management
------------------------------------------- */
typedef struct {
char **address;
int currentBlock;
int currentPos;
int blockSize;
int numberOfBlocks;
} tmemory;
tmemory *adp_statmem;
tmemory *adp_dynmem;
void set_adplib_debug(int debug);
void *myalloc(tmemory *mem, int size);
tmemory *memory_new();
void memory_clear(tmemory *mem);
void memory_free(tmemory *mem);
/* Preprocessing tools
====================================================================== */
char arr_iupac_base[128][5];
#define iupac_base(A,B) arr_iupac_base[A][B]
char *calc_contains_region(char *z, int n, int *offset, char *pat1);
/* String tools
====================================================================== */
char *mkstr(char *s);
#define dots(i,j) libPP_repeat(i,j,'.')
char *libPP_repeat(int i, int j, char c);
/* File input
====================================================================== */
/* A single sequence
------------------------------------------- */
typedef struct {
char success;
char *descr;
char *seq;
int length;
char *original_seq; /* backup for window mode */
int original_length;
} tsequence;
tsequence *sequence_new();
tsequence *sequence_free(tsequence *ts);
/* A complete file
------------------------------------------- */
#define READSEQ_FILE 1
#define READSEQ_STDIN 2
#define READSEQ_STRING 3
typedef struct {
char *filename;
char *start;
int current;
char first_input_read;
char first_descr_read;
char *temp;
} treadseq;
treadseq *readseq_open(char mode, char *filename);
treadseq *readseq_free(treadseq *rs);
/* reader for different input formats
------------------------------------------- */
tsequence *readseq_next_line(treadseq *rs);
tsequence *readseq_next_fasta(treadseq *rs);
/* Functions for results output
====================================================================== */
void simple_output_optimal (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void simple_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void simple_output_subopt (toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint);
void simple_output_subopt_end (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void rna_output_optimal (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void rna_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void rna_output_subopt (toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint);
void rna_output_subopt_end (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
/* Tokenizer for interactive command shell
====================================================================== */
typedef struct {
char **token;
int count;
} ttokenizer;
ttokenizer *tokenizer_new();
ttokenizer *tokenizer_free(ttokenizer *t);
void tokenizer_exec(ttokenizer *t, char *name, char *s);
/* wrappers for readline
====================================================================== */
void rl_init();
char *rl_gets ();
/* colored output
====================================================================== */
#define COLOR_DEFAULT "\x1b[0m"
#define COLOR_BOLD "\x1b[1m"
#define COLOR_BLACK "\x1b[0;30m"
#define COLOR_BLUE "\x1b[0;34m"
#define COLOR_GREEN "\x1b[0;32m"
#define COLOR_CYAN "\x1b[0;36m"
#define COLOR_RED "\x1b[0;31m"
#define COLOR_PURPLE "\x1b[0;35m"
#define COLOR_BROWN "\x1b[0;33m"
#define COLOR_GRAY "\x1b[0;37m"
#define COLOR_DARKGRAY "\x1b[1;30m"
#define COLOR_LIGHTBLUE "\x1b[1;34m"
#define COLOR_LIGHTGREEN "\x1b[1;32m"
#define COLOR_LIGHTCYAN "\x1b[1;36m"
#define COLOR_LIGHTRED "\x1b[1;31m"
#define COLOR_LIGHTPURPLE "\x1b[1;35m"
#define COLOR_YELLOW "\x1b[1;33m"
#define COLOR_WHITE "\x1b[1;37m"
#define pcolor(colored,col) if (colored) printf(col)
/* Output format string handling
====================================================================== */
/* calculate the number of leading spaces for sequence output */
void fs_init_leading_space(char energy, char shrepProb, char dbString, char shapeString, char prob, char rank);
/* free the leading spaces temp mem. */
void fs_free_leading_space();
/* initialize a new format string */
void format_string_init(char *s);
/* set predefined output modes */
void setOutputMode(int outputMode);
/* main entry function for sequence output */
void print_sequence(toptions *opts, tsequence *seq, int pos, int size);
/* used in window-mode to shift the input sequence */
void shift_input(toptions *opts, tsequence *seq, char output);
/* main entry function for rna result output */
void output_result
(toptions *opts, tsequence *seq,
int nres, int *energy, double *shrepProb, char *dbString, char *shapeString, double prob, int rank);
typedef struct format_string{
char type;
char *string;
struct format_string *next;
} tformat_string;
/* Initialize all stuff from adplib
====================================================================== */
void adplib_init(toptions *opts, tsequence *seq, char **z, int *n);
void adplib_free(toptions *opts, tsequence *seq);
/* Input handling
====================================================================== */
/* The alphabet */
#define A 0
#define C 1
#define G 2
#define U 3
#define N 4
void convert_input(int start, char *z, int n){
int i;
char c;
for (i=start; i<=n; i++) {
c=z[i];
if (c=='a') z[i]=A;
else if (c=='c') z[i]=C;
else if (c=='g') z[i]=G;
else if (c=='u') z[i]=U;
else if (c=='t') z[i]=U; /* replace DNA with RNA */
else if (c=='A') z[i]=A;
else if (c=='C') z[i]=C;
else if (c=='G') z[i]=G;
else if (c=='U') z[i]=U;
else if (c=='T') z[i]=U;
else z[i]=N; /* all other characters are mapped to N and will not be paired */
}
}
/* Memory handling
====================================================================== */
/* wrappers for standard C functions
------------------------------------------- */
void memerr_exit(char *f){
fprintf(stderr, "\n%s: out of memory\n", f);
fprintf(stderr, "possible reasons:\n");
fprintf(stderr, " input sequence too long\n");
fprintf(stderr, " energy range too large (decrease with -e or -c)\n");
fprintf(stderr, " shape type not abstract enough (increase with -t)\n");
exit(1);
}
void *mcalloc(size_t nobj, size_t size){
void *p;
if ((p = calloc(nobj, size)) != NULL) return p;
else memerr_exit("calloc");
return NULL;
}
void *mmalloc(size_t size){
void *p;
if ((p = malloc(size)) != NULL) return p;
else memerr_exit("malloc");
return NULL;
}
void *mrealloc(void *q, size_t size){
void *p;
if ((p = realloc(q, size)) != NULL) return p;
else memerr_exit("realloc");
return NULL;
}
/* Memory management
------------------------------------------- */
#define ALIGNMENT 8
#define BLOCKSIZE 1000000
static int adplib_debug = 0;
void set_adplib_debug(int debug){
adplib_debug = debug;
printf("adplib_debug set to %d.\n", adplib_debug);
}
void *myalloc(tmemory *mem, int size)
{
// return mcalloc(size,sizeof(char));
//}
if (adplib_debug>1) printf("myalloc(), currentBlock = , currentPos = \n");
if (size % ALIGNMENT) {
size = ((size / ALIGNMENT) + 1) * ALIGNMENT;
if (adplib_debug>1) printf("realigned: %d\n", size);
}
if (mem->currentPos + size >= mem->blockSize) {
mem->currentBlock++;
if (mem->currentBlock > mem->numberOfBlocks) {
mem->address = (char **) mrealloc(mem->address, sizeof (char *) * mem->currentBlock);
mem->numberOfBlocks = mem->currentBlock;
mem->address[mem->currentBlock - 1] = (char*) mmalloc(mem->blockSize);
// if(adplib_debug>1) printf("address of mem->address[mem->currentBlock - 1]: %d\n", mem->address[mem->currentBlock - 1]);
}
mem->currentPos = 0;
// if (adplib_debug>1) printf("mrealloc: myalloc(%d), currentBlock = %d, currentPos = %d\n", size, mem->currentBlock, mem->currentPos);
}
mem->currentPos = mem->currentPos + size;
// if (adplib_debug>1) printf("myalloc: address: %d\n", mem->address[mem->currentBlock - 1] + (mem->currentPos - size));
return(mem->address[mem->currentBlock - 1] + (mem->currentPos - size));
}
tmemory *memory_new()
{
tmemory *mem = (tmemory *) mmalloc(sizeof(tmemory));
mem->address = (char **) mmalloc(sizeof(char *));
mem->address[0] = (char *) mmalloc(BLOCKSIZE);
mem->blockSize = BLOCKSIZE;
mem->currentBlock = 1;
mem->numberOfBlocks = 1;
mem->currentPos = 0;
if (adplib_debug>1) printf("adplib.memory.new(): allocated %d bytes\n", BLOCKSIZE);
return mem;
}
void memory_clear(tmemory *mem)
{
mem->currentBlock = 0;
mem->currentPos = mem->blockSize + 1;
}
void memory_free(tmemory *mem)
{
int i;
if (adplib_debug) printf("freeing %d blocks, blockSize = %d => %d bytes\n",
mem->numberOfBlocks, mem->blockSize, mem->numberOfBlocks * mem->blockSize);
for (i=0; i<=mem->numberOfBlocks-1; i++) free(mem->address[i]);
free(mem->address);
free(mem);
}
/* Preprocessing tools
====================================================================== */
/* ---------------------------------------------------------------------------------------------------- */
/* iupac_base */
/* ---------------------------------------------------------------------------------------------------- */
static void init_iupac_base(){
int i,j;
for (i=0;i<128;i++)
for (j=0;j<5;j++)
arr_iupac_base[i][j]=0;
arr_iupac_base['a'][A]=1; arr_iupac_base['A'][A]=1;
arr_iupac_base['c'][C]=1; arr_iupac_base['C'][C]=1;
arr_iupac_base['g'][G]=1; arr_iupac_base['G'][G]=1;
arr_iupac_base['t'][U]=1; arr_iupac_base['T'][U]=1;
arr_iupac_base['u'][U]=1; arr_iupac_base['U'][U]=1;
arr_iupac_base['r'][A]=1; arr_iupac_base['R'][A]=1;
arr_iupac_base['r'][G]=1; arr_iupac_base['R'][G]=1;
arr_iupac_base['y'][C]=1; arr_iupac_base['Y'][C]=1;
arr_iupac_base['y'][U]=1; arr_iupac_base['Y'][U]=1;
arr_iupac_base['s'][G]=1; arr_iupac_base['S'][G]=1;
arr_iupac_base['s'][C]=1; arr_iupac_base['S'][C]=1;
arr_iupac_base['w'][A]=1; arr_iupac_base['W'][A]=1;
arr_iupac_base['w'][U]=1; arr_iupac_base['W'][U]=1;
arr_iupac_base['k'][G]=1; arr_iupac_base['K'][G]=1;
arr_iupac_base['k'][U]=1; arr_iupac_base['K'][U]=1;
arr_iupac_base['m'][A]=1; arr_iupac_base['M'][A]=1;
arr_iupac_base['m'][C]=1; arr_iupac_base['M'][C]=1;
arr_iupac_base['b'][C]=1; arr_iupac_base['B'][C]=1;
arr_iupac_base['b'][G]=1; arr_iupac_base['B'][G]=1;
arr_iupac_base['b'][U]=1; arr_iupac_base['B'][U]=1;
arr_iupac_base['d'][A]=1; arr_iupac_base['D'][A]=1;
arr_iupac_base['d'][G]=1; arr_iupac_base['D'][G]=1;
arr_iupac_base['d'][U]=1; arr_iupac_base['D'][U]=1;
arr_iupac_base['h'][A]=1; arr_iupac_base['H'][A]=1;
arr_iupac_base['h'][C]=1; arr_iupac_base['H'][C]=1;
arr_iupac_base['h'][U]=1; arr_iupac_base['H'][U]=1;
arr_iupac_base['v'][A]=1; arr_iupac_base['V'][A]=1;
arr_iupac_base['v'][C]=1; arr_iupac_base['V'][C]=1;
arr_iupac_base['v'][G]=1; arr_iupac_base['V'][G]=1;
arr_iupac_base['n'][A]=1; arr_iupac_base['N'][A]=1;
arr_iupac_base['n'][C]=1; arr_iupac_base['N'][C]=1;
arr_iupac_base['n'][G]=1; arr_iupac_base['N'][G]=1;
arr_iupac_base['n'][U]=1; arr_iupac_base['N'][U]=1;
}
/* char arr_iupac_base[ */
/* A Adenine
*/
/* C Cytosine
*/
/* G Guanine
*/
/* T (or U) Thymine (or Uracil)
*/
/* R A or G
*/
/* Y C or T
*/
/* S G or C
*/
/* W A or T
*/
/* K G or T
*/
/* M A or C
*/
/* B C or G or T
*/
/* D A or G or T
*/
/* H A or C or T
*/
/* V A or C or G
*/
/* N any base
*/
/* . or - gap */
char *calc_contains_region(char *z, int n, int *offset, char *pat)
{
int i,j,k,l,ppos,inPattern;
char *arr;
if (adplib_debug>1) printf("entering...\n");
if (adplib_debug>1) for(i=0; i<=n; i++) printf("z[%d] = %d\n", i, z[i]);
l = strlen(pat);
if (adplib_debug>1) for(i=0; i<=l; i++) printf("pat[%d] = %c\n", i, pat[i]);
arr = (char *) malloc((offset[n]+n+1) * sizeof(char));
if (adplib_debug>1) printf("calculating...\n");
for (j=0; j<=n; j++) {
for (i=0; i<=j; i++) {
arr[offset[j]+i]=0; // tab(i,j)
ppos = 0;
inPattern = 0;
if (j-i >= l) {
for (k=i+1;k<=j;k++) {
if (inPattern){
if (iupac_base(pat[ppos],z[k])) {
ppos++;
}
else {
inPattern = 0;
ppos = 0;
break;
}
}
else {
if (iupac_base(pat[ppos],z[k])) {
inPattern = 1;
ppos++;
}
}
if (ppos==l) arr[offset[j]+i]=1;
}
}
}
}
if (adplib_debug>1) {
for (j=0; j<=n; j++) {
for (i=0; i<=j; i++) {
printf("%d ", arr[offset[j]+i]);
}
printf("\n");
}
}
return arr;
}
/* String tools
====================================================================== */
char *mkstr(char *s){
return strcpy((char *) malloc(strlen(s)+1 * sizeof(char)), s);
}
char *mkstr_stat(char *s){
return strcpy((char *) myalloc(adp_statmem, (strlen(s)+1) * sizeof(char)), s);
}
/* ---------------------------------------------------------------------------------------------------- */
/* Dotbracket tools */
/* ---------------------------------------------------------------------------------------------------- */
static char *libPP_hlp;
static char *dots_hlp;
char *libPP_repeat(int i, int j, char c){
int k;
for (k=0; k<(j-i); k++) libPP_hlp[k]=c;
libPP_hlp[k]=0;
return(libPP_hlp);
}
static void libPP_init(tsequence *seq){
libPP_hlp = (char *) myalloc(adp_statmem, (seq->length+4) * sizeof(char));
dots_hlp = mkstr_stat(libPP_repeat(0, seq->length, '.'));
}
/* File input
====================================================================== */
/* A single sequence
------------------------------------------- */
/* typedef struct { */
/* char success; */
/* char *descr; */
/* char *seq; */
/* int length; */
/* char *original_seq; /\* backup for window mode *\/ */
/* int original_length; */
/* } tsequence; */
tsequence *sequence_new()
{
tsequence *ts;
ts = (tsequence *) mmalloc(sizeof(tsequence));
ts->success = 0;
ts->descr = NULL;
ts->seq = NULL;
ts->length = 0;
ts->original_seq = NULL;
ts->original_length = 0;
return ts;
}
tsequence *sequence_free(tsequence *ts)
{
if (ts->descr) free(ts->descr);
if (ts->seq) free(ts->seq);
if (ts->original_seq) free(ts->original_seq);
free(ts);
return NULL;
}
/* A complete file
------------------------------------------- */
#define MAXINPUT 1000000
/* typedef struct { */
/* char *filename; */
/* char *start; */
/* int current; */
/* char first_input_read; */
/* char first_descr_read; */
/* char *temp; */
/* } treadseq; */
static char *readseq_readfile(FILE *inputStream)
{
int inpc, inpn;
int tinput_alloc;
char *tinput;
inpn = 0;
tinput = (char *) malloc(MAXINPUT * sizeof(char));
tinput_alloc = MAXINPUT;
while ((inpc = getc(inputStream)) != EOF) {
if (inpn == tinput_alloc-2) {
tinput = (char *) realloc(tinput, sizeof (char) * (tinput_alloc * 2));
tinput_alloc *= 2;
}
tinput[inpn++] = inpc;
}
tinput[inpn] = 0;
return tinput;
}
treadseq *readseq_open(char mode, char *filename){
treadseq *rs;
FILE *inputStream;
rs = (treadseq *) malloc(sizeof(treadseq));
rs->current = 0;
rs->first_input_read = 0;
rs->first_descr_read = 0;
rs->temp = (char *) malloc(MAXINPUT * sizeof(char));
if (mode == READSEQ_STRING) {
rs->start = mkstr(filename);
rs->filename = mkstr("command line");
return rs;
}
if (mode == READSEQ_STDIN) {
inputStream = stdin;
rs->filename = mkstr("stdin");
}
if (mode == READSEQ_FILE) {
inputStream = fopen(filename, "r");
rs->filename = mkstr(filename);
}
if (!inputStream) {
fprintf(stderr, "error opening file %s: %s\n", filename, strerror(errno));
exit(errno);
}
rs->start = readseq_readfile(inputStream);
if (fclose(inputStream)) {
fprintf(stderr, "error closing file %s: %s\n", filename, strerror(errno));
exit(errno);
}
return rs;
}
treadseq *readseq_free(treadseq *rs){
if (rs) {
free(rs->filename);
free(rs->start);
free(rs->temp);
free(rs);
}
return NULL;
}
/* Read next line from file
------------------------------------------- */
tsequence *readseq_next_line(treadseq *rs){
tsequence *ts;
int pos;
ts = sequence_new();
ts->success = 0;
if (adplib_debug>1) printf("rs: success = 0\n");
if (adplib_debug>1) printf("rs: %d\n", rs->start[rs->current]);
rs->temp[0] = 0; pos = 0;
if (rs->start[rs->current] != 0) {
while ((rs->start[rs->current] != '\n') && (rs->start[rs->current] != 0))
rs->temp[pos++] = rs->start[rs->current++];
if (rs->start[rs->current]=='\n') rs->current++;
rs->temp[pos] = 0;
ts->seq = mkstr(rs->temp);
ts->original_seq = mkstr(rs->temp);
// remove additional #13 for DOS input files:
if ((pos >= 1) && (rs->temp[pos-1] == 13)) rs->temp[pos-1] = 0;
// remove additional #13 for DOS input files:
if ((pos >= 1) && (rs->temp[pos-1] == 13)) rs->temp[pos-1] = 0;
ts->length = ts->original_length = strlen(ts->seq);
ts->success = 1;
if (adplib_debug>1) printf("rs: success = 1\n");
}
return ts;
}
/* Read next fasta sequence from file
------------------------------------------- */
tsequence *readseq_next_fasta(treadseq *rs){
tsequence *ts;
int pos;
char inpc,fil;
ts = sequence_new();
rs->temp[0] = 0; pos = 0;
if ((rs->start[rs->current] == '>') && (rs->start[rs->current] != 0)) {
if (!rs->first_descr_read && rs->first_input_read) {
fprintf(stderr, "error in input file: missing description for first sequence\n");
exit(1);
}
rs->first_descr_read = 1;
rs->current++;
while ((rs->start[rs->current] != '\n') && (rs->start[rs->current] != 0)) rs->temp[pos++] = rs->start[rs->current++];
if (rs->start[rs->current]) rs->current++;
}
rs->temp[pos] = 0;
ts->descr = mkstr(rs->temp);
// remove additional #13 for DOS input files:
if ((pos >= 1) && (rs->temp[pos-1] == 13)) rs->temp[pos-1] = 0;
rs->temp[0] = 0; pos = 0;
fil = 1;
while ((fil || (rs->start[rs->current] != '>')) && (rs->start[rs->current] != 0)) {
while (((inpc = rs->start[rs->current]) != '\n') && (rs->start[rs->current] != 0))
if (((inpc >= 65) && (inpc <= 90)) ||
((inpc >= 97) && (inpc <= 122))) rs->temp[pos++] = rs->start[rs->current++];
else rs->current++;
fil = 0;
if (rs->start[rs->current]) rs->current++;
rs->first_input_read = 1;
}
rs->temp[pos] = 0;
ts->seq = mkstr(rs->temp);
ts->original_seq = mkstr(rs->temp);
ts->length = ts->original_length = strlen(ts->seq);
if (ts->seq[0]) ts->success = 1;
else ts->success = 0;
return ts;
}
/* Functions for results output
====================================================================== */
/* Simple standard output
------------------------------------------- */
void simple_output_optimal(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
printf("\nInput: ");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%s", seq->seq);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\nAlgebra: ");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%s", algebra);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf(", score: ");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%d\n", result_score);
pcolor(opts->colored_output,COLOR_DEFAULT);
}
void simple_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
if (strcmp(algebra, "count")) {
printf("Suboptimal range: [%d - %d]\n", range_begin, range_end);
printf("\n");
printf(" Score | Candidate\n");
// pcolor(opts->colored_output,COLOR_BOLD);
printf("-----------------------------------------------------------------\n");
// pcolor(opts->colored_output,COLOR_DEFAULT);
}
}
void simple_output_subopt(toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint)
{
if (strcmp(algebra, "count")) {
printf("%6d | %s\n", score, result_prettyprint);
}
}
void simple_output_subopt_end(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
printf("\n");
// pcolor(opts->colored_output,COLOR_BOLD);
printf("=================================================================\n");
// pcolor(opts->colored_output,COLOR_DEFAULT);
}
/* RNA output
------------------------------------------- */
void rna_output_optimal(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
if (!opts->window_mode) {
printf("\n");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%s: ", algebra);
printf("%.2f kcal/mol", ((float) result_score) / 100);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
}
void rna_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
if (!opts->window_mode && (strcmp(algebra, "count"))) {
printf("Suboptimal range: [%.2f kcal/mol - %.2f kcal/mol]\n", ((float) range_begin)/100, ((float) range_end)/100);
printf("\n");
if (seq->descr && seq->descr[0]) {
pcolor(opts->colored_output,COLOR_BOLD);
printf(">%s", seq->descr);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
if (!opts->split_output_mode) printf("%s\n", seq->original_seq);
}
// print subsequence for first window iteration
// for later iterations, this is done in function shift_input
if (opts->window_mode && (opts->window_pos==0) && (strcmp(algebra, "count"))) {
print_sequence(opts, seq, opts->window_pos, opts->window_size);
}
}
void rna_output_subopt(toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint)
{
if (strcmp(algebra, "count")) {
// printf("%s (%.2f)\n", result_prettyprint, ((float) score) / 100 );
output_result(opts, seq, 1, // TODO: number of results
&score, NULL, result_prettyprint, NULL, -1, -1);
}
}
void rna_output_subopt_end(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
/* printf("\n"); */
/* pcolor(opts->colored_output,COLOR_BOLD); */
/* printf("=================================================================\n"); */
/* pcolor(opts->colored_output,COLOR_DEFAULT); */
}
void rna_output_descr(toptions *opts, tsequence *seq)
{
if (seq->descr && seq->descr[0]) {
pcolor(opts->colored_output,COLOR_BOLD);
printf(">%s", seq->descr);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
}
/* Tokenizer for interactive command shell
====================================================================== */
#define MAXTOKEN 500
/* typedef struct { */
/* char **token; */
/* int count; */
/* } ttokenizer; */
ttokenizer *tokenizer_new(){
ttokenizer *t;
t = (ttokenizer *) malloc(sizeof(ttokenizer));
t->token = (char **) malloc(MAXTOKEN * sizeof(char *));
t->count = 0;
return t;
}
ttokenizer *tokenizer_free(ttokenizer *t){
int i;
if (t) {
for (i=0; i < t->count; i++) free(t->token[i]);
free(t->token);
free(t);
}
return NULL;
}
void tokenizer_exec(ttokenizer *t, char *name, char *s)
{
int i;
char in_quotes;
char *ttoken;
char septoken[] = {1,0};
// free old token strings:
for (i=0; i < t->count; i++) free(t->token[i]);
// build new token array:
t->count = 1;
// replace whitespaces:
in_quotes = 0;
for (i=0; i<strlen(s); i++) {
if (!in_quotes && (s[i] == ' ')) s[i] = 1;
if (s[i] == 39) {
in_quotes = 1-in_quotes;
s[i] = 1;
}
}
// build token list:
t->token[0] = mkstr(name);
while((ttoken = strtok(s,septoken))) {
t->token[t->count++] = mkstr(ttoken);
s = NULL;
}
// debug output:
/* if (debug) { */
/* printf("#token: %d\n", t->count-1); */
/* for (i=1;i<t->count;i++) printf("%s\n", t->token[i]); */
}
/* wrappers for readline
====================================================================== */
/* wrappers for readline
====================================================================== */
#ifdef HAVE_LIBEDITLINE
extern int rl_insert(int count, int c);
extern int rl_bind_key(int c, int func(int, int));
extern char *readline(const char *prompt);
extern int add_history(const char *line);
#endif
static char *line_read = (char *)NULL;
void rl_init() {
#ifdef HAVE_LIBEDITLINE
// printf("readline activated\n");
// rl_bind_key('\t', rl_insert);
#else
line_read = (char *) calloc(MAXINPUT, sizeof(char));
line_read[0] = 0;
#endif
}
char *rl_gets (){
#ifdef HAVE_LIBEDITLINE
if (line_read)
{
free (line_read);
line_read = (char *)NULL;
}
line_read = readline (NULL);
if (line_read && *line_read)
add_history (line_read);
#else
fgets(line_read, MAXINPUT-2, stdin);
// remove last newline
line_read[strlen(line_read)-1] = 0;
#endif
return (line_read);
}
/* Output format string handling
====================================================================== */
/* show shrep probability */
static char shrep_prob_show;
/* PS output */
static char graphics_alloc = 0;
static char *graphics_sequence;
static char *graphics_shapeString;
static char *graphics_preString;
static char *graphics_fileName_temp;
static char *graphics_fileName;
int number_of_graphics;
char colored_db2shape;
/* typedef struct { */
/* char type; */
/* char *string; */
/* struct format_string *next; */
/* } tformat_string; */
static tformat_string *format_string_struct = NULL; // current format string
static char *leading_space; // help strings
static char *leading_space_db;
#define FORMAT_LENGTH 1024
#define FORMAT_ENERGY 1
#define FORMAT_SHREPPROB 2
#define FORMAT_DBSTRING 3
#define FORMAT_SHAPESTRING 4
#define FORMAT_PROB 5
#define FORMAT_RANK 6
#define FORMAT_VERB 7
/* create a new format string entry */
static tformat_string *format_string_newentry(char type, char *string){
tformat_string *f;
f = (tformat_string *) mcalloc(1, sizeof(tformat_string));
f->type = type;
f->string = mkstr(string);
f->next = NULL;
return f;
}
/* free a complete format string structure */
static void format_string_free(tformat_string *l){
tformat_string *t, *tn;
t=l;
while (t){
tn = t->next;
free(t->string);
free(t);
t = tn;
}
}
/* error handling for format strings; print error and use mode -o 2 instead */
static void format_string_error(char *s, char c){
printf(s, c);
printf("Using default output mode instead (-o 2).\n");
setOutputMode(2);
}
/* build a format string structure for the given string */
static tformat_string *format_string_build(char *s){
char *t;
char type;
int pos, tpos;
tformat_string *fs, *tfs, *ffs;
if (strlen(s)>FORMAT_LENGTH) {
format_string_error("Format string too long\n", 0);
return(format_string_struct);
}
t = (char *) mcalloc(FORMAT_LENGTH+1, sizeof(char));
tfs = NULL; ffs = NULL;
pos = 0; tpos = 0;
while (s[pos]) {
switch (s[pos++]) {
case 'E':
type = FORMAT_ENERGY;
break;
case 'R':
type = FORMAT_SHREPPROB;
break;
case 'D':
type = FORMAT_DBSTRING;
break;
case 'S':
type = FORMAT_SHAPESTRING;
break;
case 'P':
type = FORMAT_PROB;
break;
case 'C':
type = FORMAT_RANK;
break;
case 'V':
type = FORMAT_VERB;
break;
default:
format_string_error("Syntax error in format string: unexpected character '%c'.\n", s[pos-1] == 1 ? ' ' : s[pos-1]);
return(format_string_struct);
}
if (s[pos++] != '{') {
format_string_error("Syntax error in format string: '{' expected.\n", 0);
return(format_string_struct);
}
else {
tpos = 0;
while (s[pos] && (s[pos] != '}')) {
if (s[pos] == '\\') {
pos++;
switch (s[pos++]) {
case 'n':
t[tpos++] = '\n';
break;
case 't':
t[tpos++] = '\t';
break;
case 'e':
t[tpos++] = '\x1b';
break;
default:
format_string_error("Syntax error in format string: unexpected character '\\%c'.\n", s[pos-1]);
return(format_string_struct);
}
}
else t[tpos++] = s[pos++];
}
t[tpos] = 0;
fs = format_string_newentry(type, t);
if (!ffs) ffs=fs;
else tfs->next = fs;
tfs = fs;
}
pos++;
}
free(t);
return ffs;
}
/* remove all ansi color command from the given string;
used to calculate the correct number of leading spaces */
static void fs_remove_color_commands(char *s){
char *t;
int pos, tpos, l;
t = s;
l = strlen(s);
pos = 0; tpos = 0;
while (pos < l) {
if (t[pos] == '\x1b') { while ((pos<l) && (t[pos]!='m')) pos++; }
else t[tpos++] = t[pos];
pos++;
}
t[tpos] = 0;
}
/* calculate the number of leading spaces for sequence output; example:
<leading spaces>gucugcaugacugacugacugacuguagcugcaugcaugcaugcacugaugca
(-20.4) ....(((.....))).........((((.....))))................
*/
void fs_init_leading_space(char energy, char shrepProb, char dbString, char shapeString, char prob, char rank){
tformat_string *itr;
int pos;
char dbinside;
char *t, *s;
s = t = (char *) myalloc(adp_statmem, (FORMAT_LENGTH+100) * sizeof(char));
leading_space_db = (char *) myalloc(adp_statmem, 100 * sizeof(char));
dbinside = 0;
itr = format_string_struct;
while (itr) {
switch(itr->type) {
case FORMAT_ENERGY:
if (energy) sprintf(s, itr->string, -10.0);
break;
case FORMAT_SHREPPROB:
if (shrep_prob_show && shrepProb) sprintf(s, itr->string, 0.5);
break;
case FORMAT_DBSTRING:
if (dbString) sprintf(s, "{");
if (!dbinside) {
sprintf(leading_space_db, itr->string, "{");
fs_remove_color_commands(leading_space_db);
pos = 0;
while(leading_space_db[pos]) {
if (leading_space_db[pos]=='{') {
leading_space_db[pos]=0;
break;
}
if ((leading_space_db[pos]=='\t') || (leading_space_db[pos]=='\n')) pos++;
else leading_space_db[pos++]=' ';
}
}
dbinside = 1;
break;
case FORMAT_SHAPESTRING:
if (shapeString) sprintf(s, itr->string, "[][]");
break;
case FORMAT_PROB:
if (prob) sprintf(s, itr->string, 0.5);
break;
case FORMAT_RANK:
if (rank) sprintf(s, itr->string, 1);
break;
case FORMAT_VERB:
sprintf(s, "%s", itr->string);
break;
}
s = s + strlen(s);
itr = itr->next;
}
fs_remove_color_commands(t);
s[0] = 0;
pos=0;
while(t[pos]) {
if (t[pos]=='{') {
t[pos]=0;
break;
}
if ((t[pos]=='\t') || (t[pos]=='\n')) pos++;
else t[pos++]=' ';
}
if (!dbString || !dbinside) t[0]=0;
leading_space = t;
}
/* free the leading spaces temp mem. */
void fs_free_leading_space(){
free(leading_space);
free(leading_space_db);
}
/* initialize a new format string */
void format_string_init(char *s){
if (format_string_struct) format_string_free(format_string_struct);
format_string_struct = format_string_build(s);
}
/* set predefined output modes */
void setOutputMode(int outputMode){
if (outputMode == 1) format_string_init("D{%s }E{(%.2f) }R{(%.7f) }P{%.7f }S{%s}C{ R = %d}V{\n}");
else if (outputMode == 2) format_string_init("E{%-8.2f}R{(%.7f) }D{%s }P{%.7f }S{%s}C{ R = %d}V{\n}");
else if (outputMode == 3) format_string_init("E{%.2f }R{%.7f }D{%s }P{%.7f }S{%s}C{ %d}V{\n}");
else format_string_init("E{%-8.2f}R{(%.7f) }D{\x1b[1;31m%s\x1b[0m }P{\x1b[1;30m%.7f\x1b[0m }S{%s}C{ R = %d}V{\n}");
}
/* Output handling for sequences
====================================================================== */
/* print position numbers for sequence;
used in window- and split-output modes */
static void print_subseq_numbers(toptions *opts, int pos, int size){
int i;
if (opts->split_output_mode) size = min(opts->window_pos + opts->window_size, pos + opts->split_output_size) - pos;
pcolor(opts->colored_output, COLOR_BLUE);
printf("%d", pos +1);
pcolor(opts->colored_output,COLOR_DEFAULT);
for (i=1; i<= size - (((int)log10(pos + 1))+1 + ((int)log10(pos + size))+1); i++) printf(" ");
pcolor(opts->colored_output,COLOR_BLUE);
printf("%d", pos + size);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
/* print the given subsequence of a sequence */
static void print_subseq(char *fs, char *s, int offset, int pos, int size){
int i, tpos;
char *tmp;
tmp = (char*) mcalloc(2*strlen(s), sizeof(char));
tpos = 0;
for (i=pos+1; i <= pos + size; i++)
if (i <= strlen(s)) tmp[tpos++] = s[offset+i-1];
else tmp[tpos++] = ' ';
tmp[tpos] = 0;
printf(fs, tmp);
free(tmp);
}
/* same as print_subseq; used in color mode */
static void print_subseq_color(char *fs, char *s, int pos, int size){
int i,c,tpos,ls;
char e;
char *lastcol, *tmp, *tmp2;
int plastcol;
ls = strlen(s);
lastcol = (char*) mcalloc(20, sizeof(char));
tmp = (char*) mcalloc(20*ls, sizeof(char));
tmp2 = (char*) mcalloc(20*ls, sizeof(char));
plastcol = -1;
// go to character pos+1:
c=0; i=0; e=0;
while (c<pos+1) {
if (s[i] == '\x1b') { e = 1; plastcol = 0; }
else if ((s[i] == 'm') && e) e = 0;
else if (e) lastcol[plastcol++] = s[i];
else c++;
i++;
}
if (plastcol != -1) lastcol[plastcol] = 0;
tpos = 0;
while (c<=pos+size) {
if (c <= ls) tmp2[tpos++] = s[i-1];
else tmp2[tpos++] = ' ';
if (s[i] == '\x1b') e = 1;
else if ((s[i] == 'm') && e) e = 0;
else if (e) ;
else c++;
i++;
}
tmp2[tpos] = 0;
strcat(tmp2, "\x1b[0m");
tmp[0]=0;
if (plastcol != -1) sprintf(tmp, "\x1b%sm", lastcol);
strcat(tmp, tmp2);
printf(fs, tmp);
free(lastcol);
free(tmp);
free(tmp2);
}
/* main entry function for sequence output */
void print_sequence(toptions *opts, tsequence *seq, int pos, int size){
if (!opts->split_output_mode) {
if (opts->window_mode) {
printf("%s%s", leading_space, leading_space_db);
print_subseq_numbers(opts, pos, size);
}
printf("%s%s", leading_space, leading_space_db);
print_subseq("%s", seq->original_seq, pos, 0, size);
printf("\n");
}
}
/* used in window-mode to shift the input sequence */
void shift_input(toptions *opts, tsequence *seq, char output){
int i;
/* printf("window_pos: %d\n", opts->window_pos); */
/* printf("window_size: %d\n", opts->window_size); */
/* printf("seq->seq: %s\n", seq->seq); */
/* printf("seq->original_seq: %s\n", seq->original_seq); */
for (i=opts->window_pos; i<=opts->window_pos + opts->window_size; i++)
seq->seq[i-opts->window_pos] = seq->original_seq[i];
if (number_of_graphics) {
for (i=opts->window_pos; i<=opts->window_pos + opts->window_size; i++)
graphics_sequence[i-opts->window_pos] = seq->original_seq[i];
graphics_sequence[strlen(seq->seq)] = 0;
}
convert_input(0, seq->seq, opts->window_size);
if (output) print_sequence(opts, seq, opts->window_pos, opts->window_size);
}
/* from db2shape-cl */
struct dbcol_result{
char *dbstr;
char *shapestr;
};
// TODO struct dbcol_result *calc_db2shape_cl(char *input, int st, int _maxloop);
/* main entry function for rna result output */
void output_result
(toptions *opts, tsequence *seq,
int nres, int *energy, double *shrepProb, char *dbString, char *shapeString, double prob, int rank){
int pos, size;
tformat_string *itr;
char colors;
itr = format_string_struct;
colors = colored_db2shape && dbString;
if (colors) {
// TODO dbcol = calc_db2shape_cl(dbString, global_shapetype, maxloop);
// TODO dbString = dbcol->dbstr;
// TODO shapeString = dbcol->shapestr;
}
while (itr) {
switch(itr->type) {
case FORMAT_ENERGY:
if (energy) printf(itr->string, (float) *energy / 100);
break;
case FORMAT_SHREPPROB:
if (shrep_prob_show && shrepProb) printf(itr->string, *shrepProb);
break;
case FORMAT_DBSTRING:
if (dbString) {
if (!opts->split_output_mode) printf(itr->string, dbString);
else {
for (pos = 0; pos < opts->window_size; pos += opts->split_output_size) {
size = opts->split_output_size;
if (pos) printf("%s%s", leading_space, leading_space_db);
else printf("%s", leading_space_db);
print_subseq_numbers(opts, pos + opts->window_pos, size);
printf("%s%s", leading_space, leading_space_db);
print_subseq("%s", seq->original_seq, opts->window_pos, pos, size);
printf("\n");
printf("%s", leading_space);
if (colors) print_subseq_color(itr->string, dbString, pos, size);
else print_subseq (itr->string, dbString, 0, pos, size);
if (pos + opts->split_output_size < opts->window_size) printf("\n");
}
}
}
break;
case FORMAT_SHAPESTRING:
if (shapeString) if (shapeString[0]==0) printf(itr->string, "_");
else printf(itr->string, shapeString);
break;
case FORMAT_PROB:
if (prob >= 0) printf(itr->string, prob);
break;
case FORMAT_RANK:
if (rank >= 0) printf(itr->string, rank);
break;
case FORMAT_VERB:
printf("%s",itr->string);
break;
}
itr = itr->next;
// free colored strings:
if (colors) {
// TODO free(dbcol->dbstr);
// TODO free(dbcol->shapestr);
// TODO free(dbcol);
}
// TODO if (dbString) rna_plot(nres, dbString_org, energy, prob, shapeString_org);
}
}
/* Initialize all stuff from adplib
====================================================================== */
void adplib_init(toptions *opts, tsequence *seq, char **z, int *n){
*z = (char *) seq->seq - 1;
if (opts->window_mode) {
*n = min(opts->window_size, seq->length);
}
else {
*n = seq->length;
opts->window_size = seq->length;
}
opts->window_size = min(opts->window_size, seq->length);
adp_dynmem = memory_new();
adp_statmem = memory_new();
libPP_init(seq);
setOutputMode(1);
init_iupac_base();
fs_init_leading_space(1,1,1,1,1,1);
}
void adplib_free(toptions *opts, tsequence *seq){
memory_free(adp_dynmem);
memory_free(adp_statmem);
}
/* ---------------------------------------------------------------------------
rnalib.h
RNA energy library, based on Haskell implementation by Jens Reeder
Author: Peter Steffen
$Date: 2006/04/18 08:40:55 $
--------------------------------------------------------------------------- */
// alphabet size (A,C,G,U,N)
#define ASIZE 5
#define inp(I) z[I]
#define d_inp(I) d_z[I]
/* basepair and stackpair predicates */
extern __shared__ char memory[] ;
// --------
#ifdef SHARED_OFFSET
#define d_offset ((int *) (memory))
#define dd_offset(I) d_offset[I]
#define memory_o (memory + 8010)
#else
#define dd_offset(I) (((I)*((I)+1))/2)
#define memory_o memory
#endif
//__device__ char *g_z;
//__device__ int *d_columns ;
// #ifdef SHARED_Z
// #define d_z (memory)
// #define memory_e (memory_o + 2010)
// #else
// #define d_z g_z
// #define memory_e memory_o
// #endif
// ------------
#ifdef SHARED_ENERGY
#define d_canPair (memory_e + 150)
#define d_stack_dg ((int *) (memory_e + 200))
#define d_tstacki_dg ((int *) (memory_e + 2800))
#define d_termaupenalty_ar ((int *) (memory_e + 5400))
#define d_il_ent_ar ((int *) (memory_e + 5600))
#define d_bl_ent_ar ((int *) (memory_e + 5800))
#define d_hl_ent_ar ((int *) (memory_e + 6000))
#define memory_s (memory_e + 6040)
#define d_basepairing(I,J) ((I+1 < J) && d_canPair[index2(d_inp((I)+1),d_inp(J))])
#define d_stackpairing(I,J) ((I+3 < J) && d_canPair[index2(d_inp((I)+1),d_inp(J))] && d_canPair[index2(d_inp((I)+2),d_inp((J)-1))])
#define d_stack_dg_ac(I,J,K,L) d_stack_dg [index4(d_inp(I),d_inp(J),d_inp(K),d_inp(L))]
#define d_sr_energy(I,J) d_stack_dg [index4(d_inp((I)),d_inp((I)+1),d_inp((J)-1),d_inp((J)))]
#define d_il_stack(I,J,K,L) (d_tstacki_dg[index4(d_inp((I)),d_inp((I)+1),d_inp((L)),d_inp((L)+1))] + \
d_tstacki_dg[index4(d_inp((J)+1),d_inp((J)),d_inp((K)+1),d_inp((K)))])
#define d_termaupenalty(I,J) d_termaupenalty_ar[index2(d_inp(I),d_inp(J))]
#else
#define memory_s memory_e
#define d_canPair g_canPair
#define d_stack_dg g_stack_dg
#define d_tstacki_dg g_tstacki_dg
#define d_termaupenalty_ar g_termaupenalty_ar
#define d_il_ent_ar g_il_ent_ar
#define d_bl_ent_ar g_bl_ent_ar
#define d_hl_ent_ar g_hl_ent_ar
#define d_basepairing(I,J) ((I+1 < J) && d_canPair[d_inp((I)+1)][d_inp(J)])
#define d_stackpairing(I,J) ((I+3 < J) && d_canPair[d_inp((I)+1)][d_inp(J)] && d_canPair[d_inp((I)+2)][d_inp((J)-1)])
#define d_stack_dg_ac(I,J,K,L) d_stack_dg[d_inp(I)][d_inp(J)][d_inp(K)][d_inp(L)]
#define d_sr_energy(I,J) d_stack_dg[d_inp((I))][d_inp((I)+1)][d_inp((J)-1)][d_inp((J))]
#define d_il_stack(I,J,K,L) (d_tstacki_dg[d_inp((I))][d_inp((I)+1)][d_inp((L))][d_inp((L)+1)] + \
d_tstacki_dg[d_inp((J)+1)][d_inp((J))][d_inp((K)+1)][d_inp((K))])
#define d_termaupenalty(I,J) d_termaupenalty_ar[d_inp(I)][d_inp(J)]
#endif
/////
#define basepairing(I,J) ((I+1 < J) && canPair[inp((I)+1)][inp(J)])
char canStackPair[ASIZE][ASIZE][ASIZE][ASIZE];
#define stackpairing(I,J) ((I+3 < J) && canPair[inp((I)+1)][inp(J)] && canPair[inp((I)+2)][inp((J)-1)])
/* alternative definition of basepair, working on characters */
char basepair(int i, int j);
__device__ char d_basepair(int i, int j);
/* Constants */
/* ------------- */
#define const_e (2.718281828459)
#define mean_scale (1.34855)
/* Energy tables */
/* ---------------------- */
/* The Jacobson-Stockmayer term for loop interpolation. */
#define jacobson_stockmayer(size) (107.856*log((size)/30.0))
#define UNDEF 1000000
char canPair[ASIZE][ASIZE];
__device__ __constant__ char g_canPair[ASIZE][ASIZE];
int stack_dg [ASIZE][ASIZE][ASIZE][ASIZE];
__device__ __constant__ int g_stack_dg [ASIZE][ASIZE][ASIZE][ASIZE];
int hl_ent_ar [31];
__device__ __constant__ int g_hl_ent_ar[31];
int tstackh_dg [ASIZE][ASIZE][ASIZE][ASIZE];
__device__ __constant__ int d_tstackh_dg [ASIZE][ASIZE][ASIZE][ASIZE];
int hl_tetra [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_hl_tetra [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
int bl_ent_ar [31];
__device__ __constant__ int g_bl_ent_ar[31];
int il_ent_ar [31];
__device__ __constant__ int g_il_ent_ar[31];
int tstacki_dg [ASIZE][ASIZE][ASIZE][ASIZE];
__device__ __constant__ int g_tstacki_dg [ASIZE][ASIZE][ASIZE][ASIZE];
int dr_dangle_dg [ASIZE][ASIZE][ASIZE];
__device__ __constant__ int d_dr_dangle_dg [ASIZE][ASIZE][ASIZE];
int dl_dangle_dg [ASIZE][ASIZE][ASIZE];
__device__ __constant__ int d_dl_dangle_dg [ASIZE][ASIZE][ASIZE];
int termaupenalty_ar [ASIZE][ASIZE];
__device__ __constant__ int g_termaupenalty_ar [ASIZE][ASIZE];
int intloop11 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_intloop11 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
int intloop21 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_intloop21 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
int intloop22 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_intloop22 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
double *scale_ar;
__device__ double *d_scale_ar;
//#include "d_energy.cu"
/* Energy Functions */
/* ----------------------------------------- */
#define index2(I,J) (I*ASIZE + J)
#define index3(I,J,K) (index2(index2(I, J), K))
#define index4(I,J,K,L) (index3(index2(I, J), K, L))
#define index5(I,J,K,L,M) (index4(index2(I, J), K, L, M))
#define index6(I,J,K,L,M,N) (index5(index2(I, J), K, L, M, N))
#define index7(I,J,K,L,M,N,O) (index6(index2(I, J), K, L, M, N, O))
#define index8(I,J,K,L,M,N,O,P) (index7(index2(I, J), K, L, M, N, O, P))
#define stack_dg_ac(I,J,K,L) stack_dg[inp(I)][inp(J)][inp(K)][inp(L)]
#define sr_energy(I,J) stack_dg[inp((I))][inp((I)+1)][inp((J)-1)][inp((J))]
#define hl_ent(size) ((size) <= 30 ? hl_ent_ar[size] : 769 + jacobson_stockmayer(i))
#define d_hl_ent(size) ((size) <= 30 ? d_hl_ent_ar[size] : 769 + jacobson_stockmayer(i))
#define hl_stack(I,J) tstackh_dg[inp((I))][inp((I)+1)][inp((J)-1)][inp((J))]
#define d_hl_stack(I,J) d_tstackh_dg[d_inp((I))][d_inp((I)+1)][d_inp((J)-1)][d_inp((J))]
int hl_energy(int i, int j);
//__device__ int d_hl_energy(int i, int j);
#define bl_ent(size) ((size) <= 30 ? bl_ent_ar[size] : 609 + jacobson_stockmayer(i))
#define d_bl_ent(size) ((size) <= 30 ? d_bl_ent_ar[size] : 609 + jacobson_stockmayer(i))
int bl_energy(int bl, int i, int j, int br);
//__device__ int d_bl_energy(int bl, int i, int j, int br);
int br_energy(int bl, int i, int j, int br);
//__device__ int d_br_energy(int bl, int i, int j, int br);
#define il_ent(size) ((size) <= 30 ? il_ent_ar[size] : 369 + jacobson_stockmayer(i))
#define d_il_ent(size) ((size) <= 30 ? d_il_ent_ar[size] : 369 + jacobson_stockmayer(i))
#define il_stack(I,J,K,L) (tstacki_dg[inp((I))][inp((I)+1)][inp((L))][inp((L)+1)] + \
tstacki_dg[inp((J)+1)][inp((J))][inp((K)+1)][inp((K))])
int il_energy(int i, int j, int k, int l);
//__device__ int d_il_energy(int i, int j, int k, int l);
#define dr_energy(I,J) dr_dangle_dg[inp((I))][inp((J))][inp((J)+1)]
#define d_dr_energy(I,J) d_dr_dangle_dg[d_inp((I))][d_inp((J))][d_inp((J)+1)]
#define dli_energy(I,J) dr_dangle_dg[inp((J))][inp((I))][inp((I)+1)]
#define d_dli_energy(I,J) d_dr_dangle_dg[d_inp((J))][d_inp((I))][d_inp((I)+1)]
#define dl_energy(I,J) dl_dangle_dg[inp((I)-1)][inp((I))][inp((J))]
#define d_dl_energy(I,J) d_dl_dangle_dg[d_inp((I)-1)][d_inp((I))][d_inp((J))]
#define dri_energy(I,J) dl_dangle_dg[inp((J)-1)][inp((J))][inp((I))]
#define d_dri_energy(I,J) d_dl_dangle_dg[d_inp((J)-1)][d_inp((J))][d_inp((I))]
#define ss_energy(I,J) 0
#define d_ss_energy(I,J) 0
#define dangles(i,j,i2,j2,k,l,k2,l2) ((dli_energy(j,k+1) + dri_energy(j2,k2+1)))
#define d_dangles(i,j,i2,j2,k,l,k2,l2) ((d_dli_energy(j,k+1) + d_dri_energy(j2,k2+1)))
#define sspenalty(a) (npp * (a))
#define d_sspenalty(a) (d_npp * (a))
#define termaupenalty(I,J) termaupenalty_ar[inp(I)][inp(J)]
#define mk_pf(X) (exp ((X)/ (-61.6321)))
#define scale(size) scale_ar[size]
#define d_scale(size) d_scale_ar[size]
/* initializations */
void rnalib_init(toptions *opts, tsequence *seq);
void rnalib_free();
/* ---------------------------------------------------------------------------
rnalib.c
RNA energy library, based on Haskell implementation by Jens Reeder
Author: Peter Steffen
$Date: 2006/04/18 08:40:51 $
--------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------- */
/* input handling */
/* ---------------------------------------------------------------------------------------------------- */
/* The alphabet */
#define A 0
#define C 1
#define G 2
#define U 3
#define N 4
static int n;
static int d_n;
static char *z;
static char *d_z;
#ifdef DIFF3
#define ROUND_THREADS
#endif
/* initialize basepair predicate */
static void init_canPair(){
if(fread(canPair, sizeof(char), ASIZE*ASIZE, energyFile) != ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("g_canPair", (char *) canPair,
ASIZE*ASIZE*sizeof(char), 0,
hipMemcpyHostToDevice));
}
/* initialize stackpair predicate */
static void init_canStackPair(){
if(fread(canStackPair, sizeof(char), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
/* // no, it's recomputed from d_canPair
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_canStackPair", (char *) canStackPair,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(char), 0,
hipMemcpyHostToDevice));
*/
}
/* alternative definition of basepair, working on characters */
char basepair(int i, int j){
return(((z[i] == 'a') && (z[j] == 'u')) ||
((z[i] == 'u') && (z[j] == 'a')) ||
((z[i] == 'c') && (z[j] == 'g')) ||
((z[i] == 'g') && (z[j] == 'c')) ||
((z[i] == 'g') && (z[j] == 'u')) ||
((z[i] == 'u') && (z[j] == 'g')));
}
//__device__ char d_basepair(int i, int j){
// return(((d_z[i] == 'a') && (d_z[j] == 'u')) ||
// ((d_z[i] == 'u') && (d_z[j] == 'a')) ||
// ((d_z[i] == 'c') && (d_z[j] == 'g')) ||
// ((d_z[i] == 'g') && (d_z[j] == 'c')) ||
// ((d_z[i] == 'g') && (d_z[j] == 'u')) ||
// ((d_z[i] == 'u') && (d_z[j] == 'g')));
//}
/* ---------------------------------------------------------------------------------------------------- */
/* Energy stuff */
/* ---------------------------------------------------------------------------------------------------- */
/* subword length */
#define size_of(I,J) ((J)-(I))
/* Some constants and utilities */
/* ---------------------------- */
/* ---------------------------------------------------------------------------------------------------- */
/* Stacking Region Energies */
/* ---------------------------------------------------------------------------------------------------- */
/*
Stabilizing energies for canonical basepairs: AU, CG, GU
Basepairing: Parameters are in 5' 3' order.
stack_dg a b c d
^ ^ ^ ^
| |_| |
|_____|
*/
static void init_stack_dg()
{
if(fread(stack_dg, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("g_stack_dg", (int *) stack_dg,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* ---------------------------------------------------------------------------------------------------- */
/* Hairpin Loop Energies */
/* ---------------------------------------------------------------------------------------------------- */
static void init_hl_ent_ar()
{
if(fread(hl_ent_ar, sizeof(int), 31, energyFile) != 31)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("g_hl_ent_ar", (int *) hl_ent_ar,
31*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* Stacking Interaction */
/* ------------------------------ */
static void init_tstackh_dg()
{
if(fread(tstackh_dg, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_tstackh_dg", (int *) tstackh_dg,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
hipMemcpyHostToDevice));
}
#define hl_stack(I,J) tstackh_dg[inp((I))][inp((I)+1)][inp((J)-1)][inp((J))]
#define d_hl_stack(I,J) d_tstackh_dg[d_inp((I))][d_inp((I)+1)][d_inp((J)-1)][d_inp((J))]
/* Tetraloop Bonus Energies */
/* ------------------------------ */
/* Ultrastable tetra-loops & energy bonus at 37 C: */
static void init_hl_tetra()
{
if(fread(hl_tetra, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_hl_tetra", (int *) hl_tetra,
ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* Terminal AU penalty is included in hl_stack, */
/* therefore it must be added explicitely only for (size == 3) */
int hl_energy(int i, int j){
int size;
int entropy;
int tetra_bonus, stack_mismatch;
int termaupen;
size = j-i-1;
entropy = hl_ent(size);
stack_mismatch = hl_stack(i,j);
tetra_bonus = hl_tetra[inp(i)][inp(i+1)][inp(i+2)][inp(i+3)][inp(i+4)][inp(i+5)];
termaupen = termaupenalty_ar[inp(i)][inp(j)];
if (size==3) return(entropy + termaupen);
if (size==4) return(entropy + stack_mismatch + tetra_bonus);
if (size>4) return(entropy + stack_mismatch);
printf("hairpin loop < 3 found. Please use production\n");
printf(" hl <<< lbase -~~ (region `with` minsize 3) ~~- lbase\n");
printf("in your grammar.\n");
exit(1);
}
//__device__ int d_hl_energy(int i, int j){
// int size;
// int entropy;
// int tetra_bonus, stack_mismatch;
// int termaupen;
//
// size = j-i-1;
// entropy = d_hl_ent(size);
// stack_mismatch = d_hl_stack(i,j);
// tetra_bonus = d_hl_tetra[d_inp(i)][d_inp(i+1)][d_inp(i+2)][d_inp(i+3)][d_inp(i+4)][d_inp(i+5)];
// termaupen = d_termaupenalty_ar[d_inp(i)][d_inp(j)];
//
// if (size==3) return(entropy + termaupen);
// if (size==4) return(entropy + stack_mismatch + tetra_bonus);
// if (size>4) return(entropy + stack_mismatch);
// printf("hairpin loop < 3 found. Please use production\n");
// printf(" hl <<< lbase -~~ (region `with` minsize 3) ~~- lbase\n");
// printf("in your grammar.\n");
// exit(1);
//}
//
/* ---------------------------------------------------------------------------------------------------- */
/* Bulge Loop Energies */
/* ---------------------------------------------------------------------------------------------------- */
static void init_bl_ent_ar()
{
if(fread(bl_ent_ar, sizeof(int), 31, energyFile) != 31)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("g_bl_ent_ar", (int *) bl_ent_ar,
31*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* Bulge Loop Left */
/* ------------------------------ */
/*
. .
. .
(bl+3) - (br-2)
If size == 1 the terminal aupenalty for the stem starting after the bulge (that is (bl+2) - (br-1))
bl+1
bl - br
is added possibly. This is unwanted. Since we do not have a chance to check the size of the bulge when parsing the stem
we substract the possible penalty here!
*/
int bl_energy(int bl, int i, int j, int br){
int stacking, size, entropy;
stacking = stack_dg[inp(bl)][inp(j+1)][inp(br-1)][inp(br)];
size = size_of(i,j);
entropy = bl_ent(size);
if (size==1) return(stacking + entropy - termaupenalty_ar[inp(bl+2)][inp(br-1)]);
else if (size>1) return(entropy + termaupenalty_ar[inp(bl)][inp(br)]);
else {printf("bl_energy size < 1\n"); exit(-1);}
}
//__device__ int d_bl_energy(int bl, int i, int j, int br){
// int stacking, size, entropy;
//
// stacking = d_stack_dg[d_inp(bl)][d_inp(j+1)][d_inp(br-1)][d_inp(br)];
// size = size_of(i,j);
// entropy = d_bl_ent(size);
//
// if (size==1) return(stacking + entropy - d_termaupenalty_ar[d_inp(bl+2)][d_inp(br-1)]);
// else if (size>1) return(entropy + d_termaupenalty_ar[d_inp(bl)][d_inp(br)]);
// else {printf("bl_energy size < 1\n"); exit(-1);}
//}
/* Bulge Loop Right */
/* ------------------------------ */
int br_energy(int bl, int i, int j, int br){
int stacking, size, entropy;
stacking = stack_dg[inp(bl)][inp(bl+1)][inp(i)][inp(br)];
size = size_of(i,j);
entropy = bl_ent(size);
if (size==1) return(stacking + entropy - termaupenalty_ar[inp(bl+1)][inp(br-2)]);
else return(entropy + termaupenalty_ar[inp(bl)][inp(br)]);
}
//__device__ int d_br_energy(int bl, int i, int j, int br){
// int stacking, size, entropy;
//
// stacking = d_stack_dg[d_inp(bl)][d_inp(bl+1)][d_inp(i)][d_inp(br)];
// size = size_of(i,j);
// entropy = d_bl_ent(size);
//
// if (size==1) return(stacking + entropy - d_termaupenalty_ar[d_inp(bl+1)][d_inp(br-2)]);
// if (size>1) return(entropy + d_termaupenalty_ar[d_inp(bl)][d_inp(br)]);
//}
/* ---------------------------------------------------------------------------------------------------- */
/* Interior Loop Energies */
/* ---------------------------------------------------------------------------------------------------- */
/* Entropic Term */
/* ------------------------------ */
/*
DESTABILIZING ENERGIES BY SIZE OF LOOP
il_ent 1 and 2 undefined in the tables of Mathews et al. since
special energy values exist
*/
static void init_il_ent_ar()
{
if(fread(il_ent_ar, sizeof(int), 31, energyFile) != 31)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("g_il_ent_ar", (int *) il_ent_ar,
31*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* Stacking Interaction */
/* ------------------------------ */
/*
STACKING ENERGIES : TERMINAL MISMATCHES AND BASE-PAIRS.
Stabilizing energies for canonical basepairs: AU, CG, GU
Basepairing: Paramers are in 5' 3' order.
tstacki_dg a b c d
^ ^ ^ ^
| |_| |
|_____|
*/
static void init_tstacki_dg()
{
if(fread(tstacki_dg, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("g_tstacki_dg", (int *) tstacki_dg,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/*
the time intensive n^4 version of internal loops
(used in reduced form O(n^2*c^2) where c is the maximal internal loop size)
(i,j) = left region, (k,l) = right region
i --- l+1
5' / \ 3'
| i+1 l / \
| | | |
\ / | | |
3' | | 5'
j k+1
\ /
j+1 --- k
*/
/* Ninio's equation */
#define il_asym(SL,SR) min(300,((abs((SL)-(SR)))*50))
#define d_il_asym(SL,SR) min(300,((abs((SL)-(SR)))*50))
/* include internal loop energies */
//#include "intloop11.c"
//#include "intloop21.c"
//#include "intloop22.c"
#define il11_energy(lb,rb) intloop11[inp((lb))][inp((lb)+1)][inp((lb)+2)][inp((rb)-2)][inp((rb)-1)][inp((rb))]
#define d_il11_energy(lb,rb) d_intloop11[d_inp((lb))][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp((rb))]
#define il12_energy(lb,rb) intloop21[inp(lb)][inp((lb)+1)][inp((lb)+2)][inp((rb)-3)][inp((rb)-2)][inp((rb)-1)][inp(rb)]
#define d_il12_energy(lb,rb) d_intloop21[d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-3)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)]
#define il21_energy(lb,rb) intloop21[inp((rb)-2)][inp((rb)-1)][inp(rb)][inp(lb)][inp((lb)+1)][inp((lb)+2)][inp((lb)+3)]
#define d_il21_energy(lb,rb) d_intloop21[d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)][d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((lb)+3)]
#define int22_energy(a,b,c,d,e,f,g,h) intloop22[inp(a)][inp(b)][inp(c)][inp(d)][inp(e)][inp(f)][inp(g)][inp(h)]
#define d_int22_energy(a,b,c,d,e,f,g,h) d_intloop22[d_inp(a)][d_inp(b)][d_inp(c)][d_inp(d)][d_inp(e)][d_inp(f)][d_inp(g)][d_inp(h)]
#define il22_energy(lb,rb) int22_energy(lb,((lb)+1),((lb)+2),((lb)+3),((rb)-3),((rb)-2),((rb)-1),rb)
#define d_il22_energy(lb,rb) d_int22_energy(lb,((lb)+1),((lb)+2),((lb)+3),((rb)-3),((rb)-2),((rb)-1),rb)
int il_energy(int i, int j, int k, int l)
{
int sl, sr;
sl = size_of(i,j);
sr = size_of(k,l);
if ((sl > 2) || (sr > 2))
return((il_ent (sl + sr))
+ (il_stack (i,j,k,l))
+ (il_asym(sl,sr))); else
if ((sl == 1) && (sr == 1)) return(il11_energy(i,l+1)); else
if ((sl == 1) && (sr == 2)) return(il12_energy(i,l+1)); else
if ((sl == 2) && (sr == 1)) return(il21_energy(i,l+1)); else
if ((sl == 2) && (sr == 2)) return(il22_energy(i,l+1)); else
return 65000;
}
//__device__ int d_il_energy(int i, int j, int k, int l)
//{
// int sl, sr;
// sl = size_of(i,j);
// sr = size_of(k,l);
// if ((sl > 2) || (sr > 2))
// return((d_il_ent (sl + sr))
// + (d_il_stack (i,j,k,l))
// + (d_il_asym(sl,sr))); else
// if ((sl == 1) && (sr == 1)) return(d_il11_energy(i,l+1)); else
// if ((sl == 1) && (sr == 2)) return(d_il12_energy(i,l+1)); else
// if ((sl == 2) && (sr == 1)) return(d_il21_energy(i,l+1)); else
// if ((sl == 2) && (sr == 2)) return(d_il22_energy(i,l+1)); else
// return 65000;
//}
/* ---------------------------------------------------------------------------------------------------- */
/* Dangling ends */
/* ---------------------------------------------------------------------------------------------------- */
/* dangle right */
/* ------------------------------ */
static void init_dr_dangle_dg()
{
if(fread(dr_dangle_dg, sizeof(int), ASIZE*ASIZE*(ASIZE), energyFile) != ASIZE*ASIZE*(ASIZE))
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_dr_dangle_dg", (int *) dr_dangle_dg,
ASIZE*ASIZE*(ASIZE)*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* dangle left */
/* ------------------------------ */
static void init_dl_dangle_dg()
{
if(fread(dl_dangle_dg, sizeof(int), (ASIZE)*ASIZE*ASIZE, energyFile) != (ASIZE)*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_dl_dangle_dg", (int *) dl_dangle_dg,
(ASIZE)*ASIZE*ASIZE*sizeof(int), 0,
hipMemcpyHostToDevice));
}
#define ss_energy(I,J) 0
#define d_ss_energy(I,J) 0
/* ---------------------------------------------------------------------------------------------------- */
/* special pseudoknot energies */
/* ---------------------------------------------------------------------------------------------------- */
/* This are the dangling energies for the bases bridging the stacks */
#define dangles(i,j,i2,j2,k,l,k2,l2) ((dli_energy(j,k+1) + dri_energy(j2,k2+1)))
#define d_dangles(i,j,i2,j2,k,l,k2,l2) ((d_dli_energy(j,k+1) + d_dri_energy(j2,k2+1)))
#define sspenalty(a) (npp * (a))
#define d_sspenalty(a) (d_npp * (a))
/* ---------------------------------------------------------------------------------------------------- */
/* Terminal AU penalty */
/* ---------------------------------------------------------------------------------------------------- */
static void init_termaupenalty_ar()
{
if(fread(termaupenalty_ar, sizeof(int), ASIZE*ASIZE, energyFile) != ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("g_termaupenalty_ar", (int *) termaupenalty_ar,
ASIZE*ASIZE*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* internal loop energies */
static void init_intloop11(){
if(fread(intloop11, sizeof(int), (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE), energyFile) != (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE))
printf("File write error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_intloop11", (int *) intloop11,
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*sizeof(int), 0,
hipMemcpyHostToDevice));
}
static void init_intloop21(){
if(fread(intloop21, sizeof(int), (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE), energyFile) !=
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE))
printf("File write error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_intloop21", (int *) intloop21,
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*sizeof(int), 0,
hipMemcpyHostToDevice));
}
static void init_intloop22(){
if(fread(intloop22, sizeof(int), (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE), energyFile) !=
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE))
printf("File write error.");
CUDA_SAFE_CALL(hipMemcpyToSymbol("d_intloop22", (int *) intloop22,
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*sizeof(int), 0,
hipMemcpyHostToDevice));
}
/* ---------------------------------------------------------------------------------------------------- */
/* Scale */
/* ---------------------------------------------------------------------------------------------------- */
static void init_scale_ar()
{
int i;
scale_ar=(double *) mcalloc(n+2, sizeof(double));
scale_ar[0] = 1.0;
for (i = 1; i<= n; i++) {
scale_ar[i] = scale_ar[i-1] / mean_scale;
}
}
/* ---------------------------------------------------------------------------------------------------- */
/* Initialize rna input and energy tables */
/* ---------------------------------------------------------------------------------------------------- */
void rnalib_init(toptions *opts, tsequence *seq)
{
/* initialize input and pairing tables */
z = seq->seq - 1;
n = seq->length;
convert_input(1, z, n);
CUDA_SAFE_CALL(hipMalloc((void **) &d_z, (n+2)*sizeof(char)));
hipMemcpy(d_z, z, (n+2)*sizeof(char), hipMemcpyHostToDevice);
d_n=n;
if((energyFile=fopen(ENERGYFILE, "rb"))==NULL) {
printf("Cannot open file %s.\n",ENERGYFILE);
exit(1);
}
init_canPair();
init_canStackPair();
/* initialize energies */
init_stack_dg();
init_hl_ent_ar();
init_tstackh_dg();
init_hl_tetra();
init_bl_ent_ar();
init_il_ent_ar();
init_tstacki_dg();
init_dr_dangle_dg();
init_dl_dangle_dg();
init_termaupenalty_ar();
init_intloop11();
init_intloop21();
init_intloop22();
init_scale_ar();
fclose(energyFile);
}
void rnalib_free()
{
free(scale_ar);
}
/* data structures */
/* -------------------------------------------------------------------------------- */
#define size_of(I,J) ((J)-(I))
#define d_il11_energy(lb,rb) d_intloop11[d_inp((lb))][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp((rb))]
#define d_il12_energy(lb,rb) d_intloop21[d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-3)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)]
#define d_il21_energy(lb,rb) d_intloop21[d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)][d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((lb)+3)]
#define d_int22_energy(a,b,c,d,e,f,g,h) d_intloop22[d_inp(a)][d_inp(b)][d_inp(c)][d_inp(d)][d_inp(e)][d_inp(f)][d_inp(g)][d_inp(h)]
#define d_il22_energy(lb,rb) d_int22_energy(lb,((lb)+1),((lb)+2),((lb)+3),((rb)-3),((rb)-2),((rb)-1),rb)
#define d_il_asym(SL,SR) min(300,((abs((SL)-(SR)))*50))
__device__ int d_hl_energy(int i, int j, char *d_z){
int size;
int entropy;
int tetra_bonus, stack_mismatch;
int termaupen;
size = j-i-1;
entropy = d_hl_ent(size);
stack_mismatch = d_hl_stack(i,j);
tetra_bonus = d_hl_tetra[d_inp(i)][d_inp(i+1)][d_inp(i+2)][d_inp(i+3)][d_inp(i+4)][d_inp(i+5)];
termaupen = d_termaupenalty(i,j);
if (size==3) return(entropy + termaupen);
if (size==4) return(entropy + stack_mismatch + tetra_bonus);
return(entropy + stack_mismatch);
//printf("hairpin loop < 3 found. Please use production\n");
//printf(" hl <<< lbase -~~ (region `with` minsize 3) ~~- lbase\n");
//printf("in your grammar.\n");
//exit(1);
}
__device__ int d_bl_energy(int bl, int i, int j, int br, char *d_z){
int stacking, size, entropy;
// stacking = d_stack_dg[d_inp(bl)][d_inp(j+1)][d_inp(br-1)][d_inp(br)];
stacking = d_stack_dg_ac(bl,j+1,br-1,br);
size = size_of(i,j);
entropy = d_bl_ent(size);
if (size==1) return(stacking + entropy - d_termaupenalty(bl+2,br-1));
return(entropy + d_termaupenalty(bl,br));
}
__device__ int d_br_energy(int bl, int i, int j, int br, char *d_z){
int stacking, size, entropy;
// stacking = d_stack_dg[d_inp(bl)][d_inp(bl+1)][d_inp(i)][d_inp(br)];
stacking = d_stack_dg_ac(bl, bl+1, i, br);
size = size_of(i,j);
entropy = d_bl_ent(size);
if (size==1) return(stacking + entropy - d_termaupenalty(bl+1,br-2));
return(entropy + d_termaupenalty(bl,br));
}
__device__ int d_il_energy(int i, int j, int k, int l, char *d_z)
{
int sl, sr;
sl = size_of(i,j);
sr = size_of(k,l);
if ((sl > 2) || (sr > 2))
return((d_il_ent (sl + sr))
+ (d_il_stack (i,j,k,l))
+ (d_il_asym(sl,sr))); else
if ((sl == 1) && (sr == 1)) return(d_il11_energy(i,l+1)); else
if ((sl == 1) && (sr == 2)) return(d_il12_energy(i,l+1)); else
if ((sl == 2) && (sr == 1)) return(d_il21_energy(i,l+1)); else
if ((sl == 2) && (sr == 2)) return(d_il22_energy(i,l+1)); else
return 65000;
}
| 4273dd8d3736086e60cd328b0bbe5b574a5c766e.cu |
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <errno.h>
#include "config.h"
#include "options.h"
#include <cutil.h>
#define is_suboptimal(a, b, c) abs(a - b) <= c
FILE *energyFile;
#define ENERGYFILE "energies.dat"
// #define DIFF3
// #define SHARED_Z // 2000
// #define SHARED_ENERGY // 8000
// #define SHARED_OFFSET // 8000
// #define SPARSE_CLOSED
// #define MAX_DIFF 200
#define SHMEM 1000
/* Always needed
====================================================================== */
#define min(A, B) ((A) < (B) ? (A) : (B))
#define max(A, B) ((A) > (B) ? (A) : (B))
/* Input handling
====================================================================== */
void convert_input(int start, char *z, int n);
/* Correct incomplete phases in adpc
====================================================================== */
#define decode(X) ((X)-'0')
/* Memory handling
====================================================================== */
/* wrappers for standard C functions
------------------------------------------- */
void memerr_exit(char *f);
void *mcalloc(size_t nobj, size_t size);
void *mmalloc(size_t size);
void *mrealloc(void *q, size_t size);
/* Memory management
------------------------------------------- */
typedef struct {
char **address;
int currentBlock;
int currentPos;
int blockSize;
int numberOfBlocks;
} tmemory;
tmemory *adp_statmem;
tmemory *adp_dynmem;
void set_adplib_debug(int debug);
void *myalloc(tmemory *mem, int size);
tmemory *memory_new();
void memory_clear(tmemory *mem);
void memory_free(tmemory *mem);
/* Preprocessing tools
====================================================================== */
char arr_iupac_base[128][5];
#define iupac_base(A,B) arr_iupac_base[A][B]
char *calc_contains_region(char *z, int n, int *offset, char *pat1);
/* String tools
====================================================================== */
char *mkstr(char *s);
#define dots(i,j) libPP_repeat(i,j,'.')
char *libPP_repeat(int i, int j, char c);
/* File input
====================================================================== */
/* A single sequence
------------------------------------------- */
typedef struct {
char success;
char *descr;
char *seq;
int length;
char *original_seq; /* backup for window mode */
int original_length;
} tsequence;
tsequence *sequence_new();
tsequence *sequence_free(tsequence *ts);
/* A complete file
------------------------------------------- */
#define READSEQ_FILE 1
#define READSEQ_STDIN 2
#define READSEQ_STRING 3
typedef struct {
char *filename;
char *start;
int current;
char first_input_read;
char first_descr_read;
char *temp;
} treadseq;
treadseq *readseq_open(char mode, char *filename);
treadseq *readseq_free(treadseq *rs);
/* reader for different input formats
------------------------------------------- */
tsequence *readseq_next_line(treadseq *rs);
tsequence *readseq_next_fasta(treadseq *rs);
/* Functions for results output
====================================================================== */
void simple_output_optimal (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void simple_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void simple_output_subopt (toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint);
void simple_output_subopt_end (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void rna_output_optimal (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void rna_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
void rna_output_subopt (toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint);
void rna_output_subopt_end (toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end);
/* Tokenizer for interactive command shell
====================================================================== */
typedef struct {
char **token;
int count;
} ttokenizer;
ttokenizer *tokenizer_new();
ttokenizer *tokenizer_free(ttokenizer *t);
void tokenizer_exec(ttokenizer *t, char *name, char *s);
/* wrappers for readline
====================================================================== */
void rl_init();
char *rl_gets ();
/* colored output
====================================================================== */
#define COLOR_DEFAULT "\x1b[0m"
#define COLOR_BOLD "\x1b[1m"
#define COLOR_BLACK "\x1b[0;30m"
#define COLOR_BLUE "\x1b[0;34m"
#define COLOR_GREEN "\x1b[0;32m"
#define COLOR_CYAN "\x1b[0;36m"
#define COLOR_RED "\x1b[0;31m"
#define COLOR_PURPLE "\x1b[0;35m"
#define COLOR_BROWN "\x1b[0;33m"
#define COLOR_GRAY "\x1b[0;37m"
#define COLOR_DARKGRAY "\x1b[1;30m"
#define COLOR_LIGHTBLUE "\x1b[1;34m"
#define COLOR_LIGHTGREEN "\x1b[1;32m"
#define COLOR_LIGHTCYAN "\x1b[1;36m"
#define COLOR_LIGHTRED "\x1b[1;31m"
#define COLOR_LIGHTPURPLE "\x1b[1;35m"
#define COLOR_YELLOW "\x1b[1;33m"
#define COLOR_WHITE "\x1b[1;37m"
#define pcolor(colored,col) if (colored) printf(col)
/* Output format string handling
====================================================================== */
/* calculate the number of leading spaces for sequence output */
void fs_init_leading_space(char energy, char shrepProb, char dbString, char shapeString, char prob, char rank);
/* free the leading spaces temp mem. */
void fs_free_leading_space();
/* initialize a new format string */
void format_string_init(char *s);
/* set predefined output modes */
void setOutputMode(int outputMode);
/* main entry function for sequence output */
void print_sequence(toptions *opts, tsequence *seq, int pos, int size);
/* used in window-mode to shift the input sequence */
void shift_input(toptions *opts, tsequence *seq, char output);
/* main entry function for rna result output */
void output_result
(toptions *opts, tsequence *seq,
int nres, int *energy, double *shrepProb, char *dbString, char *shapeString, double prob, int rank);
typedef struct format_string{
char type;
char *string;
struct format_string *next;
} tformat_string;
/* Initialize all stuff from adplib
====================================================================== */
void adplib_init(toptions *opts, tsequence *seq, char **z, int *n);
void adplib_free(toptions *opts, tsequence *seq);
/* Input handling
====================================================================== */
/* The alphabet */
#define A 0
#define C 1
#define G 2
#define U 3
#define N 4
void convert_input(int start, char *z, int n){
int i;
char c;
for (i=start; i<=n; i++) {
c=z[i];
if (c=='a') z[i]=A;
else if (c=='c') z[i]=C;
else if (c=='g') z[i]=G;
else if (c=='u') z[i]=U;
else if (c=='t') z[i]=U; /* replace DNA with RNA */
else if (c=='A') z[i]=A;
else if (c=='C') z[i]=C;
else if (c=='G') z[i]=G;
else if (c=='U') z[i]=U;
else if (c=='T') z[i]=U;
else z[i]=N; /* all other characters are mapped to N and will not be paired */
}
}
/* Memory handling
====================================================================== */
/* wrappers for standard C functions
------------------------------------------- */
void memerr_exit(char *f){
fprintf(stderr, "\n%s: out of memory\n", f);
fprintf(stderr, "possible reasons:\n");
fprintf(stderr, " input sequence too long\n");
fprintf(stderr, " energy range too large (decrease with -e or -c)\n");
fprintf(stderr, " shape type not abstract enough (increase with -t)\n");
exit(1);
}
void *mcalloc(size_t nobj, size_t size){
void *p;
if ((p = calloc(nobj, size)) != NULL) return p;
else memerr_exit("calloc");
return NULL;
}
void *mmalloc(size_t size){
void *p;
if ((p = malloc(size)) != NULL) return p;
else memerr_exit("malloc");
return NULL;
}
void *mrealloc(void *q, size_t size){
void *p;
if ((p = realloc(q, size)) != NULL) return p;
else memerr_exit("realloc");
return NULL;
}
/* Memory management
------------------------------------------- */
#define ALIGNMENT 8
#define BLOCKSIZE 1000000
static int adplib_debug = 0;
void set_adplib_debug(int debug){
adplib_debug = debug;
printf("adplib_debug set to %d.\n", adplib_debug);
}
void *myalloc(tmemory *mem, int size)
{
// return mcalloc(size,sizeof(char));
//}
if (adplib_debug>1) printf("myalloc(), currentBlock = , currentPos = \n");
if (size % ALIGNMENT) {
size = ((size / ALIGNMENT) + 1) * ALIGNMENT;
if (adplib_debug>1) printf("realigned: %d\n", size);
}
if (mem->currentPos + size >= mem->blockSize) {
mem->currentBlock++;
if (mem->currentBlock > mem->numberOfBlocks) {
mem->address = (char **) mrealloc(mem->address, sizeof (char *) * mem->currentBlock);
mem->numberOfBlocks = mem->currentBlock;
mem->address[mem->currentBlock - 1] = (char*) mmalloc(mem->blockSize);
// if(adplib_debug>1) printf("address of mem->address[mem->currentBlock - 1]: %d\n", mem->address[mem->currentBlock - 1]);
}
mem->currentPos = 0;
// if (adplib_debug>1) printf("mrealloc: myalloc(%d), currentBlock = %d, currentPos = %d\n", size, mem->currentBlock, mem->currentPos);
}
mem->currentPos = mem->currentPos + size;
// if (adplib_debug>1) printf("myalloc: address: %d\n", mem->address[mem->currentBlock - 1] + (mem->currentPos - size));
return(mem->address[mem->currentBlock - 1] + (mem->currentPos - size));
}
tmemory *memory_new()
{
tmemory *mem = (tmemory *) mmalloc(sizeof(tmemory));
mem->address = (char **) mmalloc(sizeof(char *));
mem->address[0] = (char *) mmalloc(BLOCKSIZE);
mem->blockSize = BLOCKSIZE;
mem->currentBlock = 1;
mem->numberOfBlocks = 1;
mem->currentPos = 0;
if (adplib_debug>1) printf("adplib.memory.new(): allocated %d bytes\n", BLOCKSIZE);
return mem;
}
void memory_clear(tmemory *mem)
{
mem->currentBlock = 0;
mem->currentPos = mem->blockSize + 1;
}
void memory_free(tmemory *mem)
{
int i;
if (adplib_debug) printf("freeing %d blocks, blockSize = %d => %d bytes\n",
mem->numberOfBlocks, mem->blockSize, mem->numberOfBlocks * mem->blockSize);
for (i=0; i<=mem->numberOfBlocks-1; i++) free(mem->address[i]);
free(mem->address);
free(mem);
}
/* Preprocessing tools
====================================================================== */
/* ---------------------------------------------------------------------------------------------------- */
/* iupac_base */
/* ---------------------------------------------------------------------------------------------------- */
static void init_iupac_base(){
int i,j;
for (i=0;i<128;i++)
for (j=0;j<5;j++)
arr_iupac_base[i][j]=0;
arr_iupac_base['a'][A]=1; arr_iupac_base['A'][A]=1;
arr_iupac_base['c'][C]=1; arr_iupac_base['C'][C]=1;
arr_iupac_base['g'][G]=1; arr_iupac_base['G'][G]=1;
arr_iupac_base['t'][U]=1; arr_iupac_base['T'][U]=1;
arr_iupac_base['u'][U]=1; arr_iupac_base['U'][U]=1;
arr_iupac_base['r'][A]=1; arr_iupac_base['R'][A]=1;
arr_iupac_base['r'][G]=1; arr_iupac_base['R'][G]=1;
arr_iupac_base['y'][C]=1; arr_iupac_base['Y'][C]=1;
arr_iupac_base['y'][U]=1; arr_iupac_base['Y'][U]=1;
arr_iupac_base['s'][G]=1; arr_iupac_base['S'][G]=1;
arr_iupac_base['s'][C]=1; arr_iupac_base['S'][C]=1;
arr_iupac_base['w'][A]=1; arr_iupac_base['W'][A]=1;
arr_iupac_base['w'][U]=1; arr_iupac_base['W'][U]=1;
arr_iupac_base['k'][G]=1; arr_iupac_base['K'][G]=1;
arr_iupac_base['k'][U]=1; arr_iupac_base['K'][U]=1;
arr_iupac_base['m'][A]=1; arr_iupac_base['M'][A]=1;
arr_iupac_base['m'][C]=1; arr_iupac_base['M'][C]=1;
arr_iupac_base['b'][C]=1; arr_iupac_base['B'][C]=1;
arr_iupac_base['b'][G]=1; arr_iupac_base['B'][G]=1;
arr_iupac_base['b'][U]=1; arr_iupac_base['B'][U]=1;
arr_iupac_base['d'][A]=1; arr_iupac_base['D'][A]=1;
arr_iupac_base['d'][G]=1; arr_iupac_base['D'][G]=1;
arr_iupac_base['d'][U]=1; arr_iupac_base['D'][U]=1;
arr_iupac_base['h'][A]=1; arr_iupac_base['H'][A]=1;
arr_iupac_base['h'][C]=1; arr_iupac_base['H'][C]=1;
arr_iupac_base['h'][U]=1; arr_iupac_base['H'][U]=1;
arr_iupac_base['v'][A]=1; arr_iupac_base['V'][A]=1;
arr_iupac_base['v'][C]=1; arr_iupac_base['V'][C]=1;
arr_iupac_base['v'][G]=1; arr_iupac_base['V'][G]=1;
arr_iupac_base['n'][A]=1; arr_iupac_base['N'][A]=1;
arr_iupac_base['n'][C]=1; arr_iupac_base['N'][C]=1;
arr_iupac_base['n'][G]=1; arr_iupac_base['N'][G]=1;
arr_iupac_base['n'][U]=1; arr_iupac_base['N'][U]=1;
}
/* char arr_iupac_base[ */
/* A Adenine
*/
/* C Cytosine
*/
/* G Guanine
*/
/* T (or U) Thymine (or Uracil)
*/
/* R A or G
*/
/* Y C or T
*/
/* S G or C
*/
/* W A or T
*/
/* K G or T
*/
/* M A or C
*/
/* B C or G or T
*/
/* D A or G or T
*/
/* H A or C or T
*/
/* V A or C or G
*/
/* N any base
*/
/* . or - gap */
char *calc_contains_region(char *z, int n, int *offset, char *pat)
{
int i,j,k,l,ppos,inPattern;
char *arr;
if (adplib_debug>1) printf("entering...\n");
if (adplib_debug>1) for(i=0; i<=n; i++) printf("z[%d] = %d\n", i, z[i]);
l = strlen(pat);
if (adplib_debug>1) for(i=0; i<=l; i++) printf("pat[%d] = %c\n", i, pat[i]);
arr = (char *) malloc((offset[n]+n+1) * sizeof(char));
if (adplib_debug>1) printf("calculating...\n");
for (j=0; j<=n; j++) {
for (i=0; i<=j; i++) {
arr[offset[j]+i]=0; // tab(i,j)
ppos = 0;
inPattern = 0;
if (j-i >= l) {
for (k=i+1;k<=j;k++) {
if (inPattern){
if (iupac_base(pat[ppos],z[k])) {
ppos++;
}
else {
inPattern = 0;
ppos = 0;
break;
}
}
else {
if (iupac_base(pat[ppos],z[k])) {
inPattern = 1;
ppos++;
}
}
if (ppos==l) arr[offset[j]+i]=1;
}
}
}
}
if (adplib_debug>1) {
for (j=0; j<=n; j++) {
for (i=0; i<=j; i++) {
printf("%d ", arr[offset[j]+i]);
}
printf("\n");
}
}
return arr;
}
/* String tools
====================================================================== */
char *mkstr(char *s){
return strcpy((char *) malloc(strlen(s)+1 * sizeof(char)), s);
}
char *mkstr_stat(char *s){
return strcpy((char *) myalloc(adp_statmem, (strlen(s)+1) * sizeof(char)), s);
}
/* ---------------------------------------------------------------------------------------------------- */
/* Dotbracket tools */
/* ---------------------------------------------------------------------------------------------------- */
static char *libPP_hlp;
static char *dots_hlp;
char *libPP_repeat(int i, int j, char c){
int k;
for (k=0; k<(j-i); k++) libPP_hlp[k]=c;
libPP_hlp[k]=0;
return(libPP_hlp);
}
static void libPP_init(tsequence *seq){
libPP_hlp = (char *) myalloc(adp_statmem, (seq->length+4) * sizeof(char));
dots_hlp = mkstr_stat(libPP_repeat(0, seq->length, '.'));
}
/* File input
====================================================================== */
/* A single sequence
------------------------------------------- */
/* typedef struct { */
/* char success; */
/* char *descr; */
/* char *seq; */
/* int length; */
/* char *original_seq; /\* backup for window mode *\/ */
/* int original_length; */
/* } tsequence; */
tsequence *sequence_new()
{
tsequence *ts;
ts = (tsequence *) mmalloc(sizeof(tsequence));
ts->success = 0;
ts->descr = NULL;
ts->seq = NULL;
ts->length = 0;
ts->original_seq = NULL;
ts->original_length = 0;
return ts;
}
tsequence *sequence_free(tsequence *ts)
{
if (ts->descr) free(ts->descr);
if (ts->seq) free(ts->seq);
if (ts->original_seq) free(ts->original_seq);
free(ts);
return NULL;
}
/* A complete file
------------------------------------------- */
#define MAXINPUT 1000000
/* typedef struct { */
/* char *filename; */
/* char *start; */
/* int current; */
/* char first_input_read; */
/* char first_descr_read; */
/* char *temp; */
/* } treadseq; */
static char *readseq_readfile(FILE *inputStream)
{
int inpc, inpn;
int tinput_alloc;
char *tinput;
inpn = 0;
tinput = (char *) malloc(MAXINPUT * sizeof(char));
tinput_alloc = MAXINPUT;
while ((inpc = getc(inputStream)) != EOF) {
if (inpn == tinput_alloc-2) {
tinput = (char *) realloc(tinput, sizeof (char) * (tinput_alloc * 2));
tinput_alloc *= 2;
}
tinput[inpn++] = inpc;
}
tinput[inpn] = 0;
return tinput;
}
treadseq *readseq_open(char mode, char *filename){
treadseq *rs;
FILE *inputStream;
rs = (treadseq *) malloc(sizeof(treadseq));
rs->current = 0;
rs->first_input_read = 0;
rs->first_descr_read = 0;
rs->temp = (char *) malloc(MAXINPUT * sizeof(char));
if (mode == READSEQ_STRING) {
rs->start = mkstr(filename);
rs->filename = mkstr("command line");
return rs;
}
if (mode == READSEQ_STDIN) {
inputStream = stdin;
rs->filename = mkstr("stdin");
}
if (mode == READSEQ_FILE) {
inputStream = fopen(filename, "r");
rs->filename = mkstr(filename);
}
if (!inputStream) {
fprintf(stderr, "error opening file %s: %s\n", filename, strerror(errno));
exit(errno);
}
rs->start = readseq_readfile(inputStream);
if (fclose(inputStream)) {
fprintf(stderr, "error closing file %s: %s\n", filename, strerror(errno));
exit(errno);
}
return rs;
}
treadseq *readseq_free(treadseq *rs){
if (rs) {
free(rs->filename);
free(rs->start);
free(rs->temp);
free(rs);
}
return NULL;
}
/* Read next line from file
------------------------------------------- */
tsequence *readseq_next_line(treadseq *rs){
tsequence *ts;
int pos;
ts = sequence_new();
ts->success = 0;
if (adplib_debug>1) printf("rs: success = 0\n");
if (adplib_debug>1) printf("rs: %d\n", rs->start[rs->current]);
rs->temp[0] = 0; pos = 0;
if (rs->start[rs->current] != 0) {
while ((rs->start[rs->current] != '\n') && (rs->start[rs->current] != 0))
rs->temp[pos++] = rs->start[rs->current++];
if (rs->start[rs->current]=='\n') rs->current++;
rs->temp[pos] = 0;
ts->seq = mkstr(rs->temp);
ts->original_seq = mkstr(rs->temp);
// remove additional #13 for DOS input files:
if ((pos >= 1) && (rs->temp[pos-1] == 13)) rs->temp[pos-1] = 0;
// remove additional #13 for DOS input files:
if ((pos >= 1) && (rs->temp[pos-1] == 13)) rs->temp[pos-1] = 0;
ts->length = ts->original_length = strlen(ts->seq);
ts->success = 1;
if (adplib_debug>1) printf("rs: success = 1\n");
}
return ts;
}
/* Read next fasta sequence from file
------------------------------------------- */
tsequence *readseq_next_fasta(treadseq *rs){
tsequence *ts;
int pos;
char inpc,fil;
ts = sequence_new();
rs->temp[0] = 0; pos = 0;
if ((rs->start[rs->current] == '>') && (rs->start[rs->current] != 0)) {
if (!rs->first_descr_read && rs->first_input_read) {
fprintf(stderr, "error in input file: missing description for first sequence\n");
exit(1);
}
rs->first_descr_read = 1;
rs->current++;
while ((rs->start[rs->current] != '\n') && (rs->start[rs->current] != 0)) rs->temp[pos++] = rs->start[rs->current++];
if (rs->start[rs->current]) rs->current++;
}
rs->temp[pos] = 0;
ts->descr = mkstr(rs->temp);
// remove additional #13 for DOS input files:
if ((pos >= 1) && (rs->temp[pos-1] == 13)) rs->temp[pos-1] = 0;
rs->temp[0] = 0; pos = 0;
fil = 1;
while ((fil || (rs->start[rs->current] != '>')) && (rs->start[rs->current] != 0)) {
while (((inpc = rs->start[rs->current]) != '\n') && (rs->start[rs->current] != 0))
if (((inpc >= 65) && (inpc <= 90)) ||
((inpc >= 97) && (inpc <= 122))) rs->temp[pos++] = rs->start[rs->current++];
else rs->current++;
fil = 0;
if (rs->start[rs->current]) rs->current++;
rs->first_input_read = 1;
}
rs->temp[pos] = 0;
ts->seq = mkstr(rs->temp);
ts->original_seq = mkstr(rs->temp);
ts->length = ts->original_length = strlen(ts->seq);
if (ts->seq[0]) ts->success = 1;
else ts->success = 0;
return ts;
}
/* Functions for results output
====================================================================== */
/* Simple standard output
------------------------------------------- */
void simple_output_optimal(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
printf("\nInput: ");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%s", seq->seq);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\nAlgebra: ");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%s", algebra);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf(", score: ");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%d\n", result_score);
pcolor(opts->colored_output,COLOR_DEFAULT);
}
void simple_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
if (strcmp(algebra, "count")) {
printf("Suboptimal range: [%d - %d]\n", range_begin, range_end);
printf("\n");
printf(" Score | Candidate\n");
// pcolor(opts->colored_output,COLOR_BOLD);
printf("-----------------------------------------------------------------\n");
// pcolor(opts->colored_output,COLOR_DEFAULT);
}
}
void simple_output_subopt(toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint)
{
if (strcmp(algebra, "count")) {
printf("%6d | %s\n", score, result_prettyprint);
}
}
void simple_output_subopt_end(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
printf("\n");
// pcolor(opts->colored_output,COLOR_BOLD);
printf("=================================================================\n");
// pcolor(opts->colored_output,COLOR_DEFAULT);
}
/* RNA output
------------------------------------------- */
void rna_output_optimal(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
if (!opts->window_mode) {
printf("\n");
pcolor(opts->colored_output,COLOR_BOLD);
printf("%s: ", algebra);
printf("%.2f kcal/mol", ((float) result_score) / 100);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
}
void rna_output_subopt_start(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
if (!opts->window_mode && (strcmp(algebra, "count"))) {
printf("Suboptimal range: [%.2f kcal/mol - %.2f kcal/mol]\n", ((float) range_begin)/100, ((float) range_end)/100);
printf("\n");
if (seq->descr && seq->descr[0]) {
pcolor(opts->colored_output,COLOR_BOLD);
printf(">%s", seq->descr);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
if (!opts->split_output_mode) printf("%s\n", seq->original_seq);
}
// print subsequence for first window iteration
// for later iterations, this is done in function shift_input
if (opts->window_mode && (opts->window_pos==0) && (strcmp(algebra, "count"))) {
print_sequence(opts, seq, opts->window_pos, opts->window_size);
}
}
void rna_output_subopt(toptions *opts, tsequence *seq, char *algebra, int score, char *result_prettyprint)
{
if (strcmp(algebra, "count")) {
// printf("%s (%.2f)\n", result_prettyprint, ((float) score) / 100 );
output_result(opts, seq, 1, // TODO: number of results
&score, NULL, result_prettyprint, NULL, -1, -1);
}
}
void rna_output_subopt_end(toptions *opts, tsequence *seq, char *algebra, int result_score, int range_begin, int range_end)
{
/* printf("\n"); */
/* pcolor(opts->colored_output,COLOR_BOLD); */
/* printf("=================================================================\n"); */
/* pcolor(opts->colored_output,COLOR_DEFAULT); */
}
void rna_output_descr(toptions *opts, tsequence *seq)
{
if (seq->descr && seq->descr[0]) {
pcolor(opts->colored_output,COLOR_BOLD);
printf(">%s", seq->descr);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
}
/* Tokenizer for interactive command shell
====================================================================== */
#define MAXTOKEN 500
/* typedef struct { */
/* char **token; */
/* int count; */
/* } ttokenizer; */
ttokenizer *tokenizer_new(){
ttokenizer *t;
t = (ttokenizer *) malloc(sizeof(ttokenizer));
t->token = (char **) malloc(MAXTOKEN * sizeof(char *));
t->count = 0;
return t;
}
ttokenizer *tokenizer_free(ttokenizer *t){
int i;
if (t) {
for (i=0; i < t->count; i++) free(t->token[i]);
free(t->token);
free(t);
}
return NULL;
}
void tokenizer_exec(ttokenizer *t, char *name, char *s)
{
int i;
char in_quotes;
char *ttoken;
char septoken[] = {1,0};
// free old token strings:
for (i=0; i < t->count; i++) free(t->token[i]);
// build new token array:
t->count = 1;
// replace whitespaces:
in_quotes = 0;
for (i=0; i<strlen(s); i++) {
if (!in_quotes && (s[i] == ' ')) s[i] = 1;
if (s[i] == 39) {
in_quotes = 1-in_quotes;
s[i] = 1;
}
}
// build token list:
t->token[0] = mkstr(name);
while((ttoken = strtok(s,septoken))) {
t->token[t->count++] = mkstr(ttoken);
s = NULL;
}
// debug output:
/* if (debug) { */
/* printf("#token: %d\n", t->count-1); */
/* for (i=1;i<t->count;i++) printf("%s\n", t->token[i]); */
}
/* wrappers for readline
====================================================================== */
/* wrappers for readline
====================================================================== */
#ifdef HAVE_LIBEDITLINE
extern int rl_insert(int count, int c);
extern int rl_bind_key(int c, int func(int, int));
extern char *readline(const char *prompt);
extern int add_history(const char *line);
#endif
static char *line_read = (char *)NULL;
void rl_init() {
#ifdef HAVE_LIBEDITLINE
// printf("readline activated\n");
// rl_bind_key('\t', rl_insert);
#else
line_read = (char *) calloc(MAXINPUT, sizeof(char));
line_read[0] = 0;
#endif
}
char *rl_gets (){
#ifdef HAVE_LIBEDITLINE
if (line_read)
{
free (line_read);
line_read = (char *)NULL;
}
line_read = readline (NULL);
if (line_read && *line_read)
add_history (line_read);
#else
fgets(line_read, MAXINPUT-2, stdin);
// remove last newline
line_read[strlen(line_read)-1] = 0;
#endif
return (line_read);
}
/* Output format string handling
====================================================================== */
/* show shrep probability */
static char shrep_prob_show;
/* PS output */
static char graphics_alloc = 0;
static char *graphics_sequence;
static char *graphics_shapeString;
static char *graphics_preString;
static char *graphics_fileName_temp;
static char *graphics_fileName;
int number_of_graphics;
char colored_db2shape;
/* typedef struct { */
/* char type; */
/* char *string; */
/* struct format_string *next; */
/* } tformat_string; */
static tformat_string *format_string_struct = NULL; // current format string
static char *leading_space; // help strings
static char *leading_space_db;
#define FORMAT_LENGTH 1024
#define FORMAT_ENERGY 1
#define FORMAT_SHREPPROB 2
#define FORMAT_DBSTRING 3
#define FORMAT_SHAPESTRING 4
#define FORMAT_PROB 5
#define FORMAT_RANK 6
#define FORMAT_VERB 7
/* create a new format string entry */
static tformat_string *format_string_newentry(char type, char *string){
tformat_string *f;
f = (tformat_string *) mcalloc(1, sizeof(tformat_string));
f->type = type;
f->string = mkstr(string);
f->next = NULL;
return f;
}
/* free a complete format string structure */
static void format_string_free(tformat_string *l){
tformat_string *t, *tn;
t=l;
while (t){
tn = t->next;
free(t->string);
free(t);
t = tn;
}
}
/* error handling for format strings; print error and use mode -o 2 instead */
static void format_string_error(char *s, char c){
printf(s, c);
printf("Using default output mode instead (-o 2).\n");
setOutputMode(2);
}
/* build a format string structure for the given string */
static tformat_string *format_string_build(char *s){
char *t;
char type;
int pos, tpos;
tformat_string *fs, *tfs, *ffs;
if (strlen(s)>FORMAT_LENGTH) {
format_string_error("Format string too long\n", 0);
return(format_string_struct);
}
t = (char *) mcalloc(FORMAT_LENGTH+1, sizeof(char));
tfs = NULL; ffs = NULL;
pos = 0; tpos = 0;
while (s[pos]) {
switch (s[pos++]) {
case 'E':
type = FORMAT_ENERGY;
break;
case 'R':
type = FORMAT_SHREPPROB;
break;
case 'D':
type = FORMAT_DBSTRING;
break;
case 'S':
type = FORMAT_SHAPESTRING;
break;
case 'P':
type = FORMAT_PROB;
break;
case 'C':
type = FORMAT_RANK;
break;
case 'V':
type = FORMAT_VERB;
break;
default:
format_string_error("Syntax error in format string: unexpected character '%c'.\n", s[pos-1] == 1 ? ' ' : s[pos-1]);
return(format_string_struct);
}
if (s[pos++] != '{') {
format_string_error("Syntax error in format string: '{' expected.\n", 0);
return(format_string_struct);
}
else {
tpos = 0;
while (s[pos] && (s[pos] != '}')) {
if (s[pos] == '\\') {
pos++;
switch (s[pos++]) {
case 'n':
t[tpos++] = '\n';
break;
case 't':
t[tpos++] = '\t';
break;
case 'e':
t[tpos++] = '\x1b';
break;
default:
format_string_error("Syntax error in format string: unexpected character '\\%c'.\n", s[pos-1]);
return(format_string_struct);
}
}
else t[tpos++] = s[pos++];
}
t[tpos] = 0;
fs = format_string_newentry(type, t);
if (!ffs) ffs=fs;
else tfs->next = fs;
tfs = fs;
}
pos++;
}
free(t);
return ffs;
}
/* remove all ansi color command from the given string;
used to calculate the correct number of leading spaces */
static void fs_remove_color_commands(char *s){
char *t;
int pos, tpos, l;
t = s;
l = strlen(s);
pos = 0; tpos = 0;
while (pos < l) {
if (t[pos] == '\x1b') { while ((pos<l) && (t[pos]!='m')) pos++; }
else t[tpos++] = t[pos];
pos++;
}
t[tpos] = 0;
}
/* calculate the number of leading spaces for sequence output; example:
<leading spaces>gucugcaugacugacugacugacuguagcugcaugcaugcaugcacugaugca
(-20.4) ....(((.....))).........((((.....))))................
*/
void fs_init_leading_space(char energy, char shrepProb, char dbString, char shapeString, char prob, char rank){
tformat_string *itr;
int pos;
char dbinside;
char *t, *s;
s = t = (char *) myalloc(adp_statmem, (FORMAT_LENGTH+100) * sizeof(char));
leading_space_db = (char *) myalloc(adp_statmem, 100 * sizeof(char));
dbinside = 0;
itr = format_string_struct;
while (itr) {
switch(itr->type) {
case FORMAT_ENERGY:
if (energy) sprintf(s, itr->string, -10.0);
break;
case FORMAT_SHREPPROB:
if (shrep_prob_show && shrepProb) sprintf(s, itr->string, 0.5);
break;
case FORMAT_DBSTRING:
if (dbString) sprintf(s, "{");
if (!dbinside) {
sprintf(leading_space_db, itr->string, "{");
fs_remove_color_commands(leading_space_db);
pos = 0;
while(leading_space_db[pos]) {
if (leading_space_db[pos]=='{') {
leading_space_db[pos]=0;
break;
}
if ((leading_space_db[pos]=='\t') || (leading_space_db[pos]=='\n')) pos++;
else leading_space_db[pos++]=' ';
}
}
dbinside = 1;
break;
case FORMAT_SHAPESTRING:
if (shapeString) sprintf(s, itr->string, "[][]");
break;
case FORMAT_PROB:
if (prob) sprintf(s, itr->string, 0.5);
break;
case FORMAT_RANK:
if (rank) sprintf(s, itr->string, 1);
break;
case FORMAT_VERB:
sprintf(s, "%s", itr->string);
break;
}
s = s + strlen(s);
itr = itr->next;
}
fs_remove_color_commands(t);
s[0] = 0;
pos=0;
while(t[pos]) {
if (t[pos]=='{') {
t[pos]=0;
break;
}
if ((t[pos]=='\t') || (t[pos]=='\n')) pos++;
else t[pos++]=' ';
}
if (!dbString || !dbinside) t[0]=0;
leading_space = t;
}
/* free the leading spaces temp mem. */
void fs_free_leading_space(){
free(leading_space);
free(leading_space_db);
}
/* initialize a new format string */
void format_string_init(char *s){
if (format_string_struct) format_string_free(format_string_struct);
format_string_struct = format_string_build(s);
}
/* set predefined output modes */
void setOutputMode(int outputMode){
if (outputMode == 1) format_string_init("D{%s }E{(%.2f) }R{(%.7f) }P{%.7f }S{%s}C{ R = %d}V{\n}");
else if (outputMode == 2) format_string_init("E{%-8.2f}R{(%.7f) }D{%s }P{%.7f }S{%s}C{ R = %d}V{\n}");
else if (outputMode == 3) format_string_init("E{%.2f }R{%.7f }D{%s }P{%.7f }S{%s}C{ %d}V{\n}");
else format_string_init("E{%-8.2f}R{(%.7f) }D{\x1b[1;31m%s\x1b[0m }P{\x1b[1;30m%.7f\x1b[0m }S{%s}C{ R = %d}V{\n}");
}
/* Output handling for sequences
====================================================================== */
/* print position numbers for sequence;
used in window- and split-output modes */
static void print_subseq_numbers(toptions *opts, int pos, int size){
int i;
if (opts->split_output_mode) size = min(opts->window_pos + opts->window_size, pos + opts->split_output_size) - pos;
pcolor(opts->colored_output, COLOR_BLUE);
printf("%d", pos +1);
pcolor(opts->colored_output,COLOR_DEFAULT);
for (i=1; i<= size - (((int)log10(pos + 1))+1 + ((int)log10(pos + size))+1); i++) printf(" ");
pcolor(opts->colored_output,COLOR_BLUE);
printf("%d", pos + size);
pcolor(opts->colored_output,COLOR_DEFAULT);
printf("\n");
}
/* print the given subsequence of a sequence */
static void print_subseq(char *fs, char *s, int offset, int pos, int size){
int i, tpos;
char *tmp;
tmp = (char*) mcalloc(2*strlen(s), sizeof(char));
tpos = 0;
for (i=pos+1; i <= pos + size; i++)
if (i <= strlen(s)) tmp[tpos++] = s[offset+i-1];
else tmp[tpos++] = ' ';
tmp[tpos] = 0;
printf(fs, tmp);
free(tmp);
}
/* same as print_subseq; used in color mode */
static void print_subseq_color(char *fs, char *s, int pos, int size){
int i,c,tpos,ls;
char e;
char *lastcol, *tmp, *tmp2;
int plastcol;
ls = strlen(s);
lastcol = (char*) mcalloc(20, sizeof(char));
tmp = (char*) mcalloc(20*ls, sizeof(char));
tmp2 = (char*) mcalloc(20*ls, sizeof(char));
plastcol = -1;
// go to character pos+1:
c=0; i=0; e=0;
while (c<pos+1) {
if (s[i] == '\x1b') { e = 1; plastcol = 0; }
else if ((s[i] == 'm') && e) e = 0;
else if (e) lastcol[plastcol++] = s[i];
else c++;
i++;
}
if (plastcol != -1) lastcol[plastcol] = 0;
tpos = 0;
while (c<=pos+size) {
if (c <= ls) tmp2[tpos++] = s[i-1];
else tmp2[tpos++] = ' ';
if (s[i] == '\x1b') e = 1;
else if ((s[i] == 'm') && e) e = 0;
else if (e) ;
else c++;
i++;
}
tmp2[tpos] = 0;
strcat(tmp2, "\x1b[0m");
tmp[0]=0;
if (plastcol != -1) sprintf(tmp, "\x1b%sm", lastcol);
strcat(tmp, tmp2);
printf(fs, tmp);
free(lastcol);
free(tmp);
free(tmp2);
}
/* main entry function for sequence output */
void print_sequence(toptions *opts, tsequence *seq, int pos, int size){
if (!opts->split_output_mode) {
if (opts->window_mode) {
printf("%s%s", leading_space, leading_space_db);
print_subseq_numbers(opts, pos, size);
}
printf("%s%s", leading_space, leading_space_db);
print_subseq("%s", seq->original_seq, pos, 0, size);
printf("\n");
}
}
/* used in window-mode to shift the input sequence */
void shift_input(toptions *opts, tsequence *seq, char output){
int i;
/* printf("window_pos: %d\n", opts->window_pos); */
/* printf("window_size: %d\n", opts->window_size); */
/* printf("seq->seq: %s\n", seq->seq); */
/* printf("seq->original_seq: %s\n", seq->original_seq); */
for (i=opts->window_pos; i<=opts->window_pos + opts->window_size; i++)
seq->seq[i-opts->window_pos] = seq->original_seq[i];
if (number_of_graphics) {
for (i=opts->window_pos; i<=opts->window_pos + opts->window_size; i++)
graphics_sequence[i-opts->window_pos] = seq->original_seq[i];
graphics_sequence[strlen(seq->seq)] = 0;
}
convert_input(0, seq->seq, opts->window_size);
if (output) print_sequence(opts, seq, opts->window_pos, opts->window_size);
}
/* from db2shape-cl */
struct dbcol_result{
char *dbstr;
char *shapestr;
};
// TODO struct dbcol_result *calc_db2shape_cl(char *input, int st, int _maxloop);
/* main entry function for rna result output */
void output_result
(toptions *opts, tsequence *seq,
int nres, int *energy, double *shrepProb, char *dbString, char *shapeString, double prob, int rank){
int pos, size;
tformat_string *itr;
char colors;
itr = format_string_struct;
colors = colored_db2shape && dbString;
if (colors) {
// TODO dbcol = calc_db2shape_cl(dbString, global_shapetype, maxloop);
// TODO dbString = dbcol->dbstr;
// TODO shapeString = dbcol->shapestr;
}
while (itr) {
switch(itr->type) {
case FORMAT_ENERGY:
if (energy) printf(itr->string, (float) *energy / 100);
break;
case FORMAT_SHREPPROB:
if (shrep_prob_show && shrepProb) printf(itr->string, *shrepProb);
break;
case FORMAT_DBSTRING:
if (dbString) {
if (!opts->split_output_mode) printf(itr->string, dbString);
else {
for (pos = 0; pos < opts->window_size; pos += opts->split_output_size) {
size = opts->split_output_size;
if (pos) printf("%s%s", leading_space, leading_space_db);
else printf("%s", leading_space_db);
print_subseq_numbers(opts, pos + opts->window_pos, size);
printf("%s%s", leading_space, leading_space_db);
print_subseq("%s", seq->original_seq, opts->window_pos, pos, size);
printf("\n");
printf("%s", leading_space);
if (colors) print_subseq_color(itr->string, dbString, pos, size);
else print_subseq (itr->string, dbString, 0, pos, size);
if (pos + opts->split_output_size < opts->window_size) printf("\n");
}
}
}
break;
case FORMAT_SHAPESTRING:
if (shapeString) if (shapeString[0]==0) printf(itr->string, "_");
else printf(itr->string, shapeString);
break;
case FORMAT_PROB:
if (prob >= 0) printf(itr->string, prob);
break;
case FORMAT_RANK:
if (rank >= 0) printf(itr->string, rank);
break;
case FORMAT_VERB:
printf("%s",itr->string);
break;
}
itr = itr->next;
// free colored strings:
if (colors) {
// TODO free(dbcol->dbstr);
// TODO free(dbcol->shapestr);
// TODO free(dbcol);
}
// TODO if (dbString) rna_plot(nres, dbString_org, energy, prob, shapeString_org);
}
}
/* Initialize all stuff from adplib
====================================================================== */
void adplib_init(toptions *opts, tsequence *seq, char **z, int *n){
*z = (char *) seq->seq - 1;
if (opts->window_mode) {
*n = min(opts->window_size, seq->length);
}
else {
*n = seq->length;
opts->window_size = seq->length;
}
opts->window_size = min(opts->window_size, seq->length);
adp_dynmem = memory_new();
adp_statmem = memory_new();
libPP_init(seq);
setOutputMode(1);
init_iupac_base();
fs_init_leading_space(1,1,1,1,1,1);
}
void adplib_free(toptions *opts, tsequence *seq){
memory_free(adp_dynmem);
memory_free(adp_statmem);
}
/* ---------------------------------------------------------------------------
rnalib.h
RNA energy library, based on Haskell implementation by Jens Reeder
Author: Peter Steffen
$Date: 2006/04/18 08:40:55 $
--------------------------------------------------------------------------- */
// alphabet size (A,C,G,U,N)
#define ASIZE 5
#define inp(I) z[I]
#define d_inp(I) d_z[I]
/* basepair and stackpair predicates */
extern __shared__ char memory[] ;
// --------
#ifdef SHARED_OFFSET
#define d_offset ((int *) (memory))
#define dd_offset(I) d_offset[I]
#define memory_o (memory + 8010)
#else
#define dd_offset(I) (((I)*((I)+1))/2)
#define memory_o memory
#endif
//__device__ char *g_z;
//__device__ int *d_columns ;
// #ifdef SHARED_Z
// #define d_z (memory)
// #define memory_e (memory_o + 2010)
// #else
// #define d_z g_z
// #define memory_e memory_o
// #endif
// ------------
#ifdef SHARED_ENERGY
#define d_canPair (memory_e + 150)
#define d_stack_dg ((int *) (memory_e + 200))
#define d_tstacki_dg ((int *) (memory_e + 2800))
#define d_termaupenalty_ar ((int *) (memory_e + 5400))
#define d_il_ent_ar ((int *) (memory_e + 5600))
#define d_bl_ent_ar ((int *) (memory_e + 5800))
#define d_hl_ent_ar ((int *) (memory_e + 6000))
#define memory_s (memory_e + 6040)
#define d_basepairing(I,J) ((I+1 < J) && d_canPair[index2(d_inp((I)+1),d_inp(J))])
#define d_stackpairing(I,J) ((I+3 < J) && d_canPair[index2(d_inp((I)+1),d_inp(J))] && d_canPair[index2(d_inp((I)+2),d_inp((J)-1))])
#define d_stack_dg_ac(I,J,K,L) d_stack_dg [index4(d_inp(I),d_inp(J),d_inp(K),d_inp(L))]
#define d_sr_energy(I,J) d_stack_dg [index4(d_inp((I)),d_inp((I)+1),d_inp((J)-1),d_inp((J)))]
#define d_il_stack(I,J,K,L) (d_tstacki_dg[index4(d_inp((I)),d_inp((I)+1),d_inp((L)),d_inp((L)+1))] + \
d_tstacki_dg[index4(d_inp((J)+1),d_inp((J)),d_inp((K)+1),d_inp((K)))])
#define d_termaupenalty(I,J) d_termaupenalty_ar[index2(d_inp(I),d_inp(J))]
#else
#define memory_s memory_e
#define d_canPair g_canPair
#define d_stack_dg g_stack_dg
#define d_tstacki_dg g_tstacki_dg
#define d_termaupenalty_ar g_termaupenalty_ar
#define d_il_ent_ar g_il_ent_ar
#define d_bl_ent_ar g_bl_ent_ar
#define d_hl_ent_ar g_hl_ent_ar
#define d_basepairing(I,J) ((I+1 < J) && d_canPair[d_inp((I)+1)][d_inp(J)])
#define d_stackpairing(I,J) ((I+3 < J) && d_canPair[d_inp((I)+1)][d_inp(J)] && d_canPair[d_inp((I)+2)][d_inp((J)-1)])
#define d_stack_dg_ac(I,J,K,L) d_stack_dg[d_inp(I)][d_inp(J)][d_inp(K)][d_inp(L)]
#define d_sr_energy(I,J) d_stack_dg[d_inp((I))][d_inp((I)+1)][d_inp((J)-1)][d_inp((J))]
#define d_il_stack(I,J,K,L) (d_tstacki_dg[d_inp((I))][d_inp((I)+1)][d_inp((L))][d_inp((L)+1)] + \
d_tstacki_dg[d_inp((J)+1)][d_inp((J))][d_inp((K)+1)][d_inp((K))])
#define d_termaupenalty(I,J) d_termaupenalty_ar[d_inp(I)][d_inp(J)]
#endif
/////
#define basepairing(I,J) ((I+1 < J) && canPair[inp((I)+1)][inp(J)])
char canStackPair[ASIZE][ASIZE][ASIZE][ASIZE];
#define stackpairing(I,J) ((I+3 < J) && canPair[inp((I)+1)][inp(J)] && canPair[inp((I)+2)][inp((J)-1)])
/* alternative definition of basepair, working on characters */
char basepair(int i, int j);
__device__ char d_basepair(int i, int j);
/* Constants */
/* ------------- */
#define const_e (2.718281828459)
#define mean_scale (1.34855)
/* Energy tables */
/* ---------------------- */
/* The Jacobson-Stockmayer term for loop interpolation. */
#define jacobson_stockmayer(size) (107.856*log((size)/30.0))
#define UNDEF 1000000
char canPair[ASIZE][ASIZE];
__device__ __constant__ char g_canPair[ASIZE][ASIZE];
int stack_dg [ASIZE][ASIZE][ASIZE][ASIZE];
__device__ __constant__ int g_stack_dg [ASIZE][ASIZE][ASIZE][ASIZE];
int hl_ent_ar [31];
__device__ __constant__ int g_hl_ent_ar[31];
int tstackh_dg [ASIZE][ASIZE][ASIZE][ASIZE];
__device__ __constant__ int d_tstackh_dg [ASIZE][ASIZE][ASIZE][ASIZE];
int hl_tetra [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_hl_tetra [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
int bl_ent_ar [31];
__device__ __constant__ int g_bl_ent_ar[31];
int il_ent_ar [31];
__device__ __constant__ int g_il_ent_ar[31];
int tstacki_dg [ASIZE][ASIZE][ASIZE][ASIZE];
__device__ __constant__ int g_tstacki_dg [ASIZE][ASIZE][ASIZE][ASIZE];
int dr_dangle_dg [ASIZE][ASIZE][ASIZE];
__device__ __constant__ int d_dr_dangle_dg [ASIZE][ASIZE][ASIZE];
int dl_dangle_dg [ASIZE][ASIZE][ASIZE];
__device__ __constant__ int d_dl_dangle_dg [ASIZE][ASIZE][ASIZE];
int termaupenalty_ar [ASIZE][ASIZE];
__device__ __constant__ int g_termaupenalty_ar [ASIZE][ASIZE];
int intloop11 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_intloop11 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
int intloop21 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_intloop21 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
int intloop22 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
__device__ int d_intloop22 [ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE][ASIZE];
double *scale_ar;
__device__ double *d_scale_ar;
//#include "d_energy.cu"
/* Energy Functions */
/* ----------------------------------------- */
#define index2(I,J) (I*ASIZE + J)
#define index3(I,J,K) (index2(index2(I, J), K))
#define index4(I,J,K,L) (index3(index2(I, J), K, L))
#define index5(I,J,K,L,M) (index4(index2(I, J), K, L, M))
#define index6(I,J,K,L,M,N) (index5(index2(I, J), K, L, M, N))
#define index7(I,J,K,L,M,N,O) (index6(index2(I, J), K, L, M, N, O))
#define index8(I,J,K,L,M,N,O,P) (index7(index2(I, J), K, L, M, N, O, P))
#define stack_dg_ac(I,J,K,L) stack_dg[inp(I)][inp(J)][inp(K)][inp(L)]
#define sr_energy(I,J) stack_dg[inp((I))][inp((I)+1)][inp((J)-1)][inp((J))]
#define hl_ent(size) ((size) <= 30 ? hl_ent_ar[size] : 769 + jacobson_stockmayer(i))
#define d_hl_ent(size) ((size) <= 30 ? d_hl_ent_ar[size] : 769 + jacobson_stockmayer(i))
#define hl_stack(I,J) tstackh_dg[inp((I))][inp((I)+1)][inp((J)-1)][inp((J))]
#define d_hl_stack(I,J) d_tstackh_dg[d_inp((I))][d_inp((I)+1)][d_inp((J)-1)][d_inp((J))]
int hl_energy(int i, int j);
//__device__ int d_hl_energy(int i, int j);
#define bl_ent(size) ((size) <= 30 ? bl_ent_ar[size] : 609 + jacobson_stockmayer(i))
#define d_bl_ent(size) ((size) <= 30 ? d_bl_ent_ar[size] : 609 + jacobson_stockmayer(i))
int bl_energy(int bl, int i, int j, int br);
//__device__ int d_bl_energy(int bl, int i, int j, int br);
int br_energy(int bl, int i, int j, int br);
//__device__ int d_br_energy(int bl, int i, int j, int br);
#define il_ent(size) ((size) <= 30 ? il_ent_ar[size] : 369 + jacobson_stockmayer(i))
#define d_il_ent(size) ((size) <= 30 ? d_il_ent_ar[size] : 369 + jacobson_stockmayer(i))
#define il_stack(I,J,K,L) (tstacki_dg[inp((I))][inp((I)+1)][inp((L))][inp((L)+1)] + \
tstacki_dg[inp((J)+1)][inp((J))][inp((K)+1)][inp((K))])
int il_energy(int i, int j, int k, int l);
//__device__ int d_il_energy(int i, int j, int k, int l);
#define dr_energy(I,J) dr_dangle_dg[inp((I))][inp((J))][inp((J)+1)]
#define d_dr_energy(I,J) d_dr_dangle_dg[d_inp((I))][d_inp((J))][d_inp((J)+1)]
#define dli_energy(I,J) dr_dangle_dg[inp((J))][inp((I))][inp((I)+1)]
#define d_dli_energy(I,J) d_dr_dangle_dg[d_inp((J))][d_inp((I))][d_inp((I)+1)]
#define dl_energy(I,J) dl_dangle_dg[inp((I)-1)][inp((I))][inp((J))]
#define d_dl_energy(I,J) d_dl_dangle_dg[d_inp((I)-1)][d_inp((I))][d_inp((J))]
#define dri_energy(I,J) dl_dangle_dg[inp((J)-1)][inp((J))][inp((I))]
#define d_dri_energy(I,J) d_dl_dangle_dg[d_inp((J)-1)][d_inp((J))][d_inp((I))]
#define ss_energy(I,J) 0
#define d_ss_energy(I,J) 0
#define dangles(i,j,i2,j2,k,l,k2,l2) ((dli_energy(j,k+1) + dri_energy(j2,k2+1)))
#define d_dangles(i,j,i2,j2,k,l,k2,l2) ((d_dli_energy(j,k+1) + d_dri_energy(j2,k2+1)))
#define sspenalty(a) (npp * (a))
#define d_sspenalty(a) (d_npp * (a))
#define termaupenalty(I,J) termaupenalty_ar[inp(I)][inp(J)]
#define mk_pf(X) (exp ((X)/ (-61.6321)))
#define scale(size) scale_ar[size]
#define d_scale(size) d_scale_ar[size]
/* initializations */
void rnalib_init(toptions *opts, tsequence *seq);
void rnalib_free();
/* ---------------------------------------------------------------------------
rnalib.c
RNA energy library, based on Haskell implementation by Jens Reeder
Author: Peter Steffen
$Date: 2006/04/18 08:40:51 $
--------------------------------------------------------------------------- */
/* ---------------------------------------------------------------------------------------------------- */
/* input handling */
/* ---------------------------------------------------------------------------------------------------- */
/* The alphabet */
#define A 0
#define C 1
#define G 2
#define U 3
#define N 4
static int n;
static int d_n;
static char *z;
static char *d_z;
#ifdef DIFF3
#define ROUND_THREADS
#endif
/* initialize basepair predicate */
static void init_canPair(){
if(fread(canPair, sizeof(char), ASIZE*ASIZE, energyFile) != ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("g_canPair", (char *) canPair,
ASIZE*ASIZE*sizeof(char), 0,
cudaMemcpyHostToDevice));
}
/* initialize stackpair predicate */
static void init_canStackPair(){
if(fread(canStackPair, sizeof(char), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
/* // no, it's recomputed from d_canPair
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_canStackPair", (char *) canStackPair,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(char), 0,
cudaMemcpyHostToDevice));
*/
}
/* alternative definition of basepair, working on characters */
char basepair(int i, int j){
return(((z[i] == 'a') && (z[j] == 'u')) ||
((z[i] == 'u') && (z[j] == 'a')) ||
((z[i] == 'c') && (z[j] == 'g')) ||
((z[i] == 'g') && (z[j] == 'c')) ||
((z[i] == 'g') && (z[j] == 'u')) ||
((z[i] == 'u') && (z[j] == 'g')));
}
//__device__ char d_basepair(int i, int j){
// return(((d_z[i] == 'a') && (d_z[j] == 'u')) ||
// ((d_z[i] == 'u') && (d_z[j] == 'a')) ||
// ((d_z[i] == 'c') && (d_z[j] == 'g')) ||
// ((d_z[i] == 'g') && (d_z[j] == 'c')) ||
// ((d_z[i] == 'g') && (d_z[j] == 'u')) ||
// ((d_z[i] == 'u') && (d_z[j] == 'g')));
//}
/* ---------------------------------------------------------------------------------------------------- */
/* Energy stuff */
/* ---------------------------------------------------------------------------------------------------- */
/* subword length */
#define size_of(I,J) ((J)-(I))
/* Some constants and utilities */
/* ---------------------------- */
/* ---------------------------------------------------------------------------------------------------- */
/* Stacking Region Energies */
/* ---------------------------------------------------------------------------------------------------- */
/*
Stabilizing energies for canonical basepairs: AU, CG, GU
Basepairing: Parameters are in 5' 3' order.
stack_dg a b c d
^ ^ ^ ^
| |_| |
|_____|
*/
static void init_stack_dg()
{
if(fread(stack_dg, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("g_stack_dg", (int *) stack_dg,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* ---------------------------------------------------------------------------------------------------- */
/* Hairpin Loop Energies */
/* ---------------------------------------------------------------------------------------------------- */
static void init_hl_ent_ar()
{
if(fread(hl_ent_ar, sizeof(int), 31, energyFile) != 31)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("g_hl_ent_ar", (int *) hl_ent_ar,
31*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* Stacking Interaction */
/* ------------------------------ */
static void init_tstackh_dg()
{
if(fread(tstackh_dg, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_tstackh_dg", (int *) tstackh_dg,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
#define hl_stack(I,J) tstackh_dg[inp((I))][inp((I)+1)][inp((J)-1)][inp((J))]
#define d_hl_stack(I,J) d_tstackh_dg[d_inp((I))][d_inp((I)+1)][d_inp((J)-1)][d_inp((J))]
/* Tetraloop Bonus Energies */
/* ------------------------------ */
/* Ultrastable tetra-loops & energy bonus at 37 °C: */
static void init_hl_tetra()
{
if(fread(hl_tetra, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_hl_tetra", (int *) hl_tetra,
ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* Terminal AU penalty is included in hl_stack, */
/* therefore it must be added explicitely only for (size == 3) */
int hl_energy(int i, int j){
int size;
int entropy;
int tetra_bonus, stack_mismatch;
int termaupen;
size = j-i-1;
entropy = hl_ent(size);
stack_mismatch = hl_stack(i,j);
tetra_bonus = hl_tetra[inp(i)][inp(i+1)][inp(i+2)][inp(i+3)][inp(i+4)][inp(i+5)];
termaupen = termaupenalty_ar[inp(i)][inp(j)];
if (size==3) return(entropy + termaupen);
if (size==4) return(entropy + stack_mismatch + tetra_bonus);
if (size>4) return(entropy + stack_mismatch);
printf("hairpin loop < 3 found. Please use production\n");
printf(" hl <<< lbase -~~ (region `with` minsize 3) ~~- lbase\n");
printf("in your grammar.\n");
exit(1);
}
//__device__ int d_hl_energy(int i, int j){
// int size;
// int entropy;
// int tetra_bonus, stack_mismatch;
// int termaupen;
//
// size = j-i-1;
// entropy = d_hl_ent(size);
// stack_mismatch = d_hl_stack(i,j);
// tetra_bonus = d_hl_tetra[d_inp(i)][d_inp(i+1)][d_inp(i+2)][d_inp(i+3)][d_inp(i+4)][d_inp(i+5)];
// termaupen = d_termaupenalty_ar[d_inp(i)][d_inp(j)];
//
// if (size==3) return(entropy + termaupen);
// if (size==4) return(entropy + stack_mismatch + tetra_bonus);
// if (size>4) return(entropy + stack_mismatch);
// printf("hairpin loop < 3 found. Please use production\n");
// printf(" hl <<< lbase -~~ (region `with` minsize 3) ~~- lbase\n");
// printf("in your grammar.\n");
// exit(1);
//}
//
/* ---------------------------------------------------------------------------------------------------- */
/* Bulge Loop Energies */
/* ---------------------------------------------------------------------------------------------------- */
static void init_bl_ent_ar()
{
if(fread(bl_ent_ar, sizeof(int), 31, energyFile) != 31)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("g_bl_ent_ar", (int *) bl_ent_ar,
31*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* Bulge Loop Left */
/* ------------------------------ */
/*
. .
. .
(bl+3) - (br-2)
If size == 1 the terminal aupenalty for the stem starting after the bulge (that is (bl+2) - (br-1))
bl+1
bl - br
is added possibly. This is unwanted. Since we do not have a chance to check the size of the bulge when parsing the stem
we substract the possible penalty here!
*/
int bl_energy(int bl, int i, int j, int br){
int stacking, size, entropy;
stacking = stack_dg[inp(bl)][inp(j+1)][inp(br-1)][inp(br)];
size = size_of(i,j);
entropy = bl_ent(size);
if (size==1) return(stacking + entropy - termaupenalty_ar[inp(bl+2)][inp(br-1)]);
else if (size>1) return(entropy + termaupenalty_ar[inp(bl)][inp(br)]);
else {printf("bl_energy size < 1\n"); exit(-1);}
}
//__device__ int d_bl_energy(int bl, int i, int j, int br){
// int stacking, size, entropy;
//
// stacking = d_stack_dg[d_inp(bl)][d_inp(j+1)][d_inp(br-1)][d_inp(br)];
// size = size_of(i,j);
// entropy = d_bl_ent(size);
//
// if (size==1) return(stacking + entropy - d_termaupenalty_ar[d_inp(bl+2)][d_inp(br-1)]);
// else if (size>1) return(entropy + d_termaupenalty_ar[d_inp(bl)][d_inp(br)]);
// else {printf("bl_energy size < 1\n"); exit(-1);}
//}
/* Bulge Loop Right */
/* ------------------------------ */
int br_energy(int bl, int i, int j, int br){
int stacking, size, entropy;
stacking = stack_dg[inp(bl)][inp(bl+1)][inp(i)][inp(br)];
size = size_of(i,j);
entropy = bl_ent(size);
if (size==1) return(stacking + entropy - termaupenalty_ar[inp(bl+1)][inp(br-2)]);
else return(entropy + termaupenalty_ar[inp(bl)][inp(br)]);
}
//__device__ int d_br_energy(int bl, int i, int j, int br){
// int stacking, size, entropy;
//
// stacking = d_stack_dg[d_inp(bl)][d_inp(bl+1)][d_inp(i)][d_inp(br)];
// size = size_of(i,j);
// entropy = d_bl_ent(size);
//
// if (size==1) return(stacking + entropy - d_termaupenalty_ar[d_inp(bl+1)][d_inp(br-2)]);
// if (size>1) return(entropy + d_termaupenalty_ar[d_inp(bl)][d_inp(br)]);
//}
/* ---------------------------------------------------------------------------------------------------- */
/* Interior Loop Energies */
/* ---------------------------------------------------------------------------------------------------- */
/* Entropic Term */
/* ------------------------------ */
/*
DESTABILIZING ENERGIES BY SIZE OF LOOP
il_ent 1 and 2 undefined in the tables of Mathews et al. since
special energy values exist
*/
static void init_il_ent_ar()
{
if(fread(il_ent_ar, sizeof(int), 31, energyFile) != 31)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("g_il_ent_ar", (int *) il_ent_ar,
31*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* Stacking Interaction */
/* ------------------------------ */
/*
STACKING ENERGIES : TERMINAL MISMATCHES AND BASE-PAIRS.
Stabilizing energies for canonical basepairs: AU, CG, GU
Basepairing: Paramers are in 5' 3' order.
tstacki_dg a b c d
^ ^ ^ ^
| |_| |
|_____|
*/
static void init_tstacki_dg()
{
if(fread(tstacki_dg, sizeof(int), ASIZE*ASIZE*ASIZE*ASIZE, energyFile) != ASIZE*ASIZE*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("g_tstacki_dg", (int *) tstacki_dg,
ASIZE*ASIZE*ASIZE*ASIZE*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/*
the time intensive n^4 version of internal loops
(used in reduced form O(n^2*c^2) where c is the maximal internal loop size)
(i,j) = left region, (k,l) = right region
i --- l+1
5' / \ 3'
| i+1 l / \
| | | |
\ / | | |
3' | | 5'
j k+1
\ /
j+1 --- k
*/
/* Ninio's equation */
#define il_asym(SL,SR) min(300,((abs((SL)-(SR)))*50))
#define d_il_asym(SL,SR) min(300,((abs((SL)-(SR)))*50))
/* include internal loop energies */
//#include "intloop11.c"
//#include "intloop21.c"
//#include "intloop22.c"
#define il11_energy(lb,rb) intloop11[inp((lb))][inp((lb)+1)][inp((lb)+2)][inp((rb)-2)][inp((rb)-1)][inp((rb))]
#define d_il11_energy(lb,rb) d_intloop11[d_inp((lb))][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp((rb))]
#define il12_energy(lb,rb) intloop21[inp(lb)][inp((lb)+1)][inp((lb)+2)][inp((rb)-3)][inp((rb)-2)][inp((rb)-1)][inp(rb)]
#define d_il12_energy(lb,rb) d_intloop21[d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-3)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)]
#define il21_energy(lb,rb) intloop21[inp((rb)-2)][inp((rb)-1)][inp(rb)][inp(lb)][inp((lb)+1)][inp((lb)+2)][inp((lb)+3)]
#define d_il21_energy(lb,rb) d_intloop21[d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)][d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((lb)+3)]
#define int22_energy(a,b,c,d,e,f,g,h) intloop22[inp(a)][inp(b)][inp(c)][inp(d)][inp(e)][inp(f)][inp(g)][inp(h)]
#define d_int22_energy(a,b,c,d,e,f,g,h) d_intloop22[d_inp(a)][d_inp(b)][d_inp(c)][d_inp(d)][d_inp(e)][d_inp(f)][d_inp(g)][d_inp(h)]
#define il22_energy(lb,rb) int22_energy(lb,((lb)+1),((lb)+2),((lb)+3),((rb)-3),((rb)-2),((rb)-1),rb)
#define d_il22_energy(lb,rb) d_int22_energy(lb,((lb)+1),((lb)+2),((lb)+3),((rb)-3),((rb)-2),((rb)-1),rb)
int il_energy(int i, int j, int k, int l)
{
int sl, sr;
sl = size_of(i,j);
sr = size_of(k,l);
if ((sl > 2) || (sr > 2))
return((il_ent (sl + sr))
+ (il_stack (i,j,k,l))
+ (il_asym(sl,sr))); else
if ((sl == 1) && (sr == 1)) return(il11_energy(i,l+1)); else
if ((sl == 1) && (sr == 2)) return(il12_energy(i,l+1)); else
if ((sl == 2) && (sr == 1)) return(il21_energy(i,l+1)); else
if ((sl == 2) && (sr == 2)) return(il22_energy(i,l+1)); else
return 65000;
}
//__device__ int d_il_energy(int i, int j, int k, int l)
//{
// int sl, sr;
// sl = size_of(i,j);
// sr = size_of(k,l);
// if ((sl > 2) || (sr > 2))
// return((d_il_ent (sl + sr))
// + (d_il_stack (i,j,k,l))
// + (d_il_asym(sl,sr))); else
// if ((sl == 1) && (sr == 1)) return(d_il11_energy(i,l+1)); else
// if ((sl == 1) && (sr == 2)) return(d_il12_energy(i,l+1)); else
// if ((sl == 2) && (sr == 1)) return(d_il21_energy(i,l+1)); else
// if ((sl == 2) && (sr == 2)) return(d_il22_energy(i,l+1)); else
// return 65000;
//}
/* ---------------------------------------------------------------------------------------------------- */
/* Dangling ends */
/* ---------------------------------------------------------------------------------------------------- */
/* dangle right */
/* ------------------------------ */
static void init_dr_dangle_dg()
{
if(fread(dr_dangle_dg, sizeof(int), ASIZE*ASIZE*(ASIZE), energyFile) != ASIZE*ASIZE*(ASIZE))
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_dr_dangle_dg", (int *) dr_dangle_dg,
ASIZE*ASIZE*(ASIZE)*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* dangle left */
/* ------------------------------ */
static void init_dl_dangle_dg()
{
if(fread(dl_dangle_dg, sizeof(int), (ASIZE)*ASIZE*ASIZE, energyFile) != (ASIZE)*ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_dl_dangle_dg", (int *) dl_dangle_dg,
(ASIZE)*ASIZE*ASIZE*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
#define ss_energy(I,J) 0
#define d_ss_energy(I,J) 0
/* ---------------------------------------------------------------------------------------------------- */
/* special pseudoknot energies */
/* ---------------------------------------------------------------------------------------------------- */
/* This are the dangling energies for the bases bridging the stacks */
#define dangles(i,j,i2,j2,k,l,k2,l2) ((dli_energy(j,k+1) + dri_energy(j2,k2+1)))
#define d_dangles(i,j,i2,j2,k,l,k2,l2) ((d_dli_energy(j,k+1) + d_dri_energy(j2,k2+1)))
#define sspenalty(a) (npp * (a))
#define d_sspenalty(a) (d_npp * (a))
/* ---------------------------------------------------------------------------------------------------- */
/* Terminal AU penalty */
/* ---------------------------------------------------------------------------------------------------- */
static void init_termaupenalty_ar()
{
if(fread(termaupenalty_ar, sizeof(int), ASIZE*ASIZE, energyFile) != ASIZE*ASIZE)
printf("File read error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("g_termaupenalty_ar", (int *) termaupenalty_ar,
ASIZE*ASIZE*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* internal loop energies */
static void init_intloop11(){
if(fread(intloop11, sizeof(int), (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE), energyFile) != (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE))
printf("File write error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_intloop11", (int *) intloop11,
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
static void init_intloop21(){
if(fread(intloop21, sizeof(int), (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE), energyFile) !=
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE))
printf("File write error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_intloop21", (int *) intloop21,
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
static void init_intloop22(){
if(fread(intloop22, sizeof(int), (ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE), energyFile) !=
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE))
printf("File write error.");
CUDA_SAFE_CALL(cudaMemcpyToSymbol("d_intloop22", (int *) intloop22,
(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*(ASIZE)*sizeof(int), 0,
cudaMemcpyHostToDevice));
}
/* ---------------------------------------------------------------------------------------------------- */
/* Scale */
/* ---------------------------------------------------------------------------------------------------- */
static void init_scale_ar()
{
int i;
scale_ar=(double *) mcalloc(n+2, sizeof(double));
scale_ar[0] = 1.0;
for (i = 1; i<= n; i++) {
scale_ar[i] = scale_ar[i-1] / mean_scale;
}
}
/* ---------------------------------------------------------------------------------------------------- */
/* Initialize rna input and energy tables */
/* ---------------------------------------------------------------------------------------------------- */
void rnalib_init(toptions *opts, tsequence *seq)
{
/* initialize input and pairing tables */
z = seq->seq - 1;
n = seq->length;
convert_input(1, z, n);
CUDA_SAFE_CALL(cudaMalloc((void **) &d_z, (n+2)*sizeof(char)));
cudaMemcpy(d_z, z, (n+2)*sizeof(char), cudaMemcpyHostToDevice);
d_n=n;
if((energyFile=fopen(ENERGYFILE, "rb"))==NULL) {
printf("Cannot open file %s.\n",ENERGYFILE);
exit(1);
}
init_canPair();
init_canStackPair();
/* initialize energies */
init_stack_dg();
init_hl_ent_ar();
init_tstackh_dg();
init_hl_tetra();
init_bl_ent_ar();
init_il_ent_ar();
init_tstacki_dg();
init_dr_dangle_dg();
init_dl_dangle_dg();
init_termaupenalty_ar();
init_intloop11();
init_intloop21();
init_intloop22();
init_scale_ar();
fclose(energyFile);
}
void rnalib_free()
{
free(scale_ar);
}
/* data structures */
/* -------------------------------------------------------------------------------- */
#define size_of(I,J) ((J)-(I))
#define d_il11_energy(lb,rb) d_intloop11[d_inp((lb))][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp((rb))]
#define d_il12_energy(lb,rb) d_intloop21[d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((rb)-3)][d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)]
#define d_il21_energy(lb,rb) d_intloop21[d_inp((rb)-2)][d_inp((rb)-1)][d_inp(rb)][d_inp(lb)][d_inp((lb)+1)][d_inp((lb)+2)][d_inp((lb)+3)]
#define d_int22_energy(a,b,c,d,e,f,g,h) d_intloop22[d_inp(a)][d_inp(b)][d_inp(c)][d_inp(d)][d_inp(e)][d_inp(f)][d_inp(g)][d_inp(h)]
#define d_il22_energy(lb,rb) d_int22_energy(lb,((lb)+1),((lb)+2),((lb)+3),((rb)-3),((rb)-2),((rb)-1),rb)
#define d_il_asym(SL,SR) min(300,((abs((SL)-(SR)))*50))
__device__ int d_hl_energy(int i, int j, char *d_z){
int size;
int entropy;
int tetra_bonus, stack_mismatch;
int termaupen;
size = j-i-1;
entropy = d_hl_ent(size);
stack_mismatch = d_hl_stack(i,j);
tetra_bonus = d_hl_tetra[d_inp(i)][d_inp(i+1)][d_inp(i+2)][d_inp(i+3)][d_inp(i+4)][d_inp(i+5)];
termaupen = d_termaupenalty(i,j);
if (size==3) return(entropy + termaupen);
if (size==4) return(entropy + stack_mismatch + tetra_bonus);
return(entropy + stack_mismatch);
//printf("hairpin loop < 3 found. Please use production\n");
//printf(" hl <<< lbase -~~ (region `with` minsize 3) ~~- lbase\n");
//printf("in your grammar.\n");
//exit(1);
}
__device__ int d_bl_energy(int bl, int i, int j, int br, char *d_z){
int stacking, size, entropy;
// stacking = d_stack_dg[d_inp(bl)][d_inp(j+1)][d_inp(br-1)][d_inp(br)];
stacking = d_stack_dg_ac(bl,j+1,br-1,br);
size = size_of(i,j);
entropy = d_bl_ent(size);
if (size==1) return(stacking + entropy - d_termaupenalty(bl+2,br-1));
return(entropy + d_termaupenalty(bl,br));
}
__device__ int d_br_energy(int bl, int i, int j, int br, char *d_z){
int stacking, size, entropy;
// stacking = d_stack_dg[d_inp(bl)][d_inp(bl+1)][d_inp(i)][d_inp(br)];
stacking = d_stack_dg_ac(bl, bl+1, i, br);
size = size_of(i,j);
entropy = d_bl_ent(size);
if (size==1) return(stacking + entropy - d_termaupenalty(bl+1,br-2));
return(entropy + d_termaupenalty(bl,br));
}
__device__ int d_il_energy(int i, int j, int k, int l, char *d_z)
{
int sl, sr;
sl = size_of(i,j);
sr = size_of(k,l);
if ((sl > 2) || (sr > 2))
return((d_il_ent (sl + sr))
+ (d_il_stack (i,j,k,l))
+ (d_il_asym(sl,sr))); else
if ((sl == 1) && (sr == 1)) return(d_il11_energy(i,l+1)); else
if ((sl == 1) && (sr == 2)) return(d_il12_energy(i,l+1)); else
if ((sl == 2) && (sr == 1)) return(d_il21_energy(i,l+1)); else
if ((sl == 2) && (sr == 2)) return(d_il22_energy(i,l+1)); else
return 65000;
}
|
1a4354bce7a1129df92b4bfc58e3088dade1bdf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Decimate.h"
#include "Quadric_GPU.cuh"
#include "QEM_Data.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_for_each.h>
#include <tbb/mutex.h>
#include <map>
#include <thread>
#include <Timer.h>
#include <Random.h>
#include "../Random.cuh"
#include "../QueryDevice.h"
#include <Decimate/QuadricError.h>
using namespace Zephyr;
using namespace Zephyr::Common;
using namespace Zephyr::GPU;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(hipError_t err, const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (hipSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if (hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if (hipSuccess != err)
{
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString(err));
exit(-1);
}
#endif
return;
}
__constant__ double MAX_ERRORS[2]; // quadric error, max flip error
__device__
Quadric_GPU computeFaceQE(INDEX_TYPE IdStart, INDEX_TYPE* index, Vector3f* vertices)
{
Vector3f v0 = vertices[index[IdStart]];
Vector3f v1 = vertices[index[IdStart + 1]];
Vector3f v2 = vertices[index[IdStart + 2]];
Vector3f n = (v1 - v0).cross(v2 - v0);
double area = n.norm();
if (area > FLT_MIN)
{
n /= area;
area *= 0.5;
}
double a = n[0];
double b = n[1];
double c = n[2];
double d = -(v0.dot(n));
Quadric_GPU q(a, b, c, d);
q *= area;
return q;
}
__device__
double computeError(int id, QEM_Data* QEM_Datas)
{
auto data = QEM_Datas[id];
// if invalid just return max error
if (!data.bValid)
return 10000.0;
Quadric_GPU q;
for (int i = 0; i < data.indexCount; i += 3)
{
q += computeFaceQE(i, data.indices, data.vertices);
}
double err = q.evalute(data.vertices[data.vertexToKeepId]);
//printf("err: %f\n", err);
return (err < MAX_ERRORS[0]) ? err : 10000.0;
}
__global__
void computeErrors(QEM_Data* QEM_Datas, double* errors)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
errors[i] = computeError(i, QEM_Datas);
}
__global__
void selectBestEdge(double* errors, int* random, int* bestEdge)
{
extern __shared__ double sErrors[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
// copy the errors to shared memory
sErrors[threadIdx.x] = errors[i];
__syncthreads();
// Only one thread per block can search for the best edge
if (0 == threadIdx.x)
{
double bestError = 10000.0f;
int bestHalfEdge = -1;
for (int i = 0; i < blockDim.x; ++i)
{
double err = sErrors[i];
if (bestError > err)
{
bestError = err;
bestHalfEdge = random[blockIdx.x * blockDim.x + i];
}
}
//printf("id: %d, Score: %f", bestHalfEdge, bestError);
bestEdge[blockIdx.x] = bestHalfEdge;
}
}
struct ConstError
{
ConstError(double maxQuadricError_, double maxNormalFlipDeviation_)
: maxQuadricError(maxQuadricError_), maxNormalFlipDeviation(maxNormalFlipDeviation_) {}
double maxQuadricError;
double maxNormalFlipDeviation;
};
int GPU::decimate(Common::OpenMeshMesh & mesh, unsigned int targetFaceCount, unsigned int binSize, Algorithm::DecimationType type)
{
Common::Timer timer;
auto& omesh = mesh.getMesh();
int collapseCount = -1;
auto previousFaceCount = omesh.n_faces();
std::cout << "Using ";
if (Algorithm::DecimationType::GPU_RANDOM_DECIMATE == type)
{
std::cout << "GPU Random Decimation..." << std::endl;
collapseCount = GPU::decimateMC(mesh, targetFaceCount, binSize);
}
else if (Algorithm::DecimationType::GPU_SUPER_VERTEX == type)
{
std::cout << "GPU Super Vertex..." << std::endl;
collapseCount = GPU::decimateSuperVertex(mesh, targetFaceCount, binSize);
}
auto elapseTime = timer.getElapsedTime();
auto& omeshDecimated = mesh.getMesh();
omesh.garbage_collection();
std::cout << "Decimation done in " << elapseTime << " sec" << std::endl;
std::cout << "Original Face Count: " << previousFaceCount << std::endl;
std::cout << "Target Face Count: " << targetFaceCount << std::endl;
std::cout << "Removed Face Count: " << collapseCount << std::endl;
std::cout << "Decimated Face Count: " << omeshDecimated.n_faces() << std::endl;
std::cout << "Percentage decimated: " << ((previousFaceCount - omeshDecimated.n_faces()) / (float)previousFaceCount) * 100.0f << " %" << std::endl;
return collapseCount;
}
int GPU::decimateMC(Common::OpenMeshMesh & mesh, unsigned int targetFaceCount, unsigned int binSize)
{
const float maxQuadricError = 0.1f;
const float maxNormalFlipDeviation = 45.0;
const int maxRetryCount = 50;
// set the constant memory data
ConstError constError(maxQuadricError, maxNormalFlipDeviation);
hipMemcpyToSymbol(MAX_ERRORS, (ConstError*)&constError, sizeof(constError));
auto& omesh = mesh.getMesh();
int retryCount = 0;
size_t initialFaceCount = omesh.n_faces();
size_t currentFaceCount = initialFaceCount;
size_t totalHalfEdgeCount = omesh.n_halfedges();
size_t totalCollapseRequired = (initialFaceCount - targetFaceCount) / 2;
int numOfThreads = std::thread::hardware_concurrency();
std::vector<std::vector<std::pair<float, HalfedgeHandle>>> selectedErrorEdgesPerThread(numOfThreads);
std::vector<std::shared_ptr<RandomGenerator>> randomGenerators;
//QueryDevice::printQuery();
// compute the total number of block we require to complete the task
int N = totalCollapseRequired;
int threadPerBlock = binSize;
// check how many block we can run together at once
int oneIterationBlockCount = QueryDevice::computeOptimalBlockCount(N, threadPerBlock, 0);
int oneIterationSelectionSize = oneIterationBlockCount * threadPerBlock;
std::cout << "1 iteration block count: " << oneIterationBlockCount << std::endl;
std::cout << "1 iteration selection size: " << oneIterationSelectionSize << std::endl;
int randomSequence = 0;
thrust::device_vector<int> d_randomEdgesId(oneIterationSelectionSize);
thrust::host_vector<int, thrust::system::cuda::experimental::pinned_allocator<int>> h_randomEdgesId(oneIterationSelectionSize);
thrust::device_vector<double> d_Errors(oneIterationBlockCount);
thrust::device_vector<int> d_bestEdges(oneIterationBlockCount);
thrust::host_vector <int, thrust::system::cuda::experimental::pinned_allocator<int>> h_BestEdge(oneIterationBlockCount);
CopyPartialMesh partialMesh(oneIterationSelectionSize);
int* d_BestEdges_ptr = thrust::raw_pointer_cast(&d_bestEdges[0]);
int* d_randomEdgesId_ptr = thrust::raw_pointer_cast(&d_randomEdgesId[0]);
double* d_Errors_ptr = thrust::raw_pointer_cast(&d_Errors[0]);
// do the first random generation outside of iteration.
Random::generateRandomInt(d_randomEdgesId, 0, (int)totalHalfEdgeCount - 1, randomSequence);
h_randomEdgesId = d_randomEdgesId;
int totalCollapseCount = 0;
int garbageCollectTrigger = oneIterationBlockCount / 2;
// do the exact collapse
int collapseCount = std::numeric_limits<int>::max();
while (retryCount < maxRetryCount && currentFaceCount > targetFaceCount && collapseCount > (0.05 * oneIterationBlockCount))
{
collapseCount = 0;
// Data marshalling and CUDA kernel call
{
// Synchronize the async copy of random edge id from device to host.
hipDeviceSynchronize();
//Timer copyPartialTimer;
QEM_Data* d_QEM_Datas_ptr = partialMesh.copyPartialMesh(omesh, h_randomEdgesId);
//std::cout << "Copy partial time: " << copyPartialTimer.getElapsedTime() << std::endl;
//Timer ComputeErrorTimer;
// Compute the Error of each random edge selected.
hipLaunchKernelGGL(( computeErrors) , dim3(oneIterationBlockCount), dim3(threadPerBlock) , 0, 0, d_QEM_Datas_ptr, d_Errors_ptr);
//std::cout << "Compute Error time: " << ComputeErrorTimer.getElapsedTime() << std::endl;
//CudaCheckError();
// Compare and find the edge with best score.
//Timer bestEdgeTimer;
hipLaunchKernelGGL(( selectBestEdge) , dim3(oneIterationBlockCount), dim3(threadPerBlock), threadPerBlock * sizeof(double) , 0, d_Errors_ptr, d_randomEdgesId_ptr, d_BestEdges_ptr);
//std::cout << "Best Edge time: " << bestEdgeTimer.getElapsedTime() << std::endl;
//CudaCheckError();
// generate the next set of random number
//Timer randomTimer;
Random::generateRandomInt(d_randomEdgesId, 0, (int)totalHalfEdgeCount - 1, randomSequence);
// Interleave, Async copy from device to host
hipMemcpyAsync(thrust::raw_pointer_cast(h_randomEdgesId.data()),
thrust::raw_pointer_cast(d_randomEdgesId.data()),
d_randomEdgesId.size()*sizeof(int),
hipMemcpyDeviceToHost);
//std::cout << "Generate Random time: " << randomTimer.getElapsedTime() << std::endl;
// copy the result of the kernel back to host
h_BestEdge = d_bestEdges;
}
Timer collapseTimer;
int faceCollapsed = 0;
for (auto bestEdgeId : h_BestEdge)
{
if (bestEdgeId == -1)
continue;
HalfedgeHandle halfEdgeHandle(bestEdgeId);
if (!omesh.is_collapse_ok(halfEdgeHandle))
continue;
// if the edge is a boundary edge only 1 face is removed by the collapse, otherwise 2 face is removed
faceCollapsed += omesh.is_boundary(halfEdgeHandle) ? 1 : 2;
omesh.collapse(halfEdgeHandle);
++collapseCount;
}
// Do a garbage collection when invalid cross the trigger threshold
// Doing garbage collection will remove the deleted edge from the data set, hence removing chance of picking an already deleted edge.
if (collapseCount < garbageCollectTrigger)
{
omesh.garbage_collection();
totalHalfEdgeCount = omesh.n_halfedges();
garbageCollectTrigger /= 2;
//std::cout << "Remaining Half Edge: " << totalHalfEdgeCount << std::endl;
}
totalCollapseCount += collapseCount;
//std::cout << "Total Collapsed this iteration: " << collapseCount << " Total Collapsed: " << totalCollapseCount << std::endl;
//std::cout << "Original Collapsed this iteration: " << originalCollapse << std::endl;
//std::cout << "Additional Collapsed this iteration: " << additionalCollapse << std::endl;
//std::cout << "Total Collapsed : " << totalCollapseCount << std::endl;
currentFaceCount -= faceCollapsed;
//std::cout << "Collapse time: " << collapseTimer.getElapsedTime() << std::endl;
}
omesh.garbage_collection();
return totalCollapseCount;
}
// Super Vertex Decimation
struct SV_Header
{
__device__
SV_Header() : size(0) {}
unsigned int size;
};
struct SV_Data
{
int indexStart[MAX_FACE];
};
__global__
void setupHeaderAndData(int* indices, SV_Header* headers, SV_Data* datas, unsigned char maxFace)
{
int faceId = blockIdx.x * blockDim.x + threadIdx.x;
int index = faceId * 3;
for (int i = 0; i < 3; ++i)
{
int vertexId = indices[index + i];
unsigned size = atomicInc(&headers[vertexId].size, maxFace);
datas[vertexId].indexStart[size] = index;
}
}
__device__
Quadric_GPU computeSVFaceQE(int IdStart, int* index, Vector3f* vertices)
{
Vector3f v0 = vertices[index[IdStart]];
Vector3f v1 = vertices[index[IdStart + 1]];
Vector3f v2 = vertices[index[IdStart + 2]];
Vector3f n = (v1 - v0).cross(v2 - v0);
double area = n.norm();
if (area > FLT_MIN)
{
n /= area;
area *= 0.5;
}
double a = n[0];
double b = n[1];
double c = n[2];
double d = -(v0.dot(n));
Quadric_GPU q(a, b, c, d);
q *= area;
return q;
}
__global__
void computeSVQuadricAllFace(int* indices, Vector3f* vertices, Quadric_GPU* FaceQuadric)
{
int faceId = blockIdx.x * blockDim.x + threadIdx.x;
int index = faceId * 3;
FaceQuadric[faceId] = computeSVFaceQE(index, indices, vertices);
}
__global__
void computeSVVertexQuadric(SV_Header* headers, SV_Data* datas, Quadric_GPU* faceQuadric, Quadric_GPU* vertexQuadric, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId)
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
Quadric_GPU q;
for (int i = 0; i < header.size; ++i)
{
int indexStart = data.indexStart[i];
int faceId = indexStart / 3;
q += faceQuadric[faceId];
}
vertexQuadric[vertexId] = q;
}
__global__
void selectIndependentVertex(SV_Header* headers, SV_Data* datas, int* indices, bool* vertexUsed, bool* selected, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId)
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
bool bIndependent = true;
if (false == vertexUsed[vertexId])
{
for (int i = 0; i < header.size && bIndependent; ++i)
{
int indexStart = data.indexStart[i];
for (int j = 0; j < 3 && bIndependent; ++j)
{
int checkVertexId = indices[indexStart + j];
if (true == vertexUsed[checkVertexId])
{
bIndependent = false;
}
}
}
if (bIndependent)
{
selected[vertexId] = true;
vertexUsed[vertexId] = true;
for (int i = 0; i < header.size; ++i)
{
int indexStart = data.indexStart[i];
for (int j = 0; j < 3; ++j)
{
int checkVertexId = indices[indexStart + j];
vertexUsed[checkVertexId] = true;
}
}
}
}
}
__global__
void checkIndependentVertex(SV_Header* headers, SV_Data* datas, int* indices, bool* selected, bool* checked, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId || false == selected[vertexId])
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
bool bValid = true;
for (size_t i = 0; i < vertexCount && bValid; ++i)
{
if (false == selected[i] && i == vertexId)
continue;
// check if there is any conflict
for (int j = 0; j < header.size && bValid; ++j)
{
int indexStart = data.indexStart[j];
for (int k = 0; k < 3 && bValid; ++k)
{
int checkVertexId = indices[indexStart + k];
if (i == checkVertexId) // conflict detected
{
bValid = false;
}
}
}
}
if (bValid)
{
// remove self as independent vertex
checked[vertexId] = true;
}
}
struct BestEdge
{
__device__
BestEdge() : error(10000.0), vertexId(-1) {}
double error;
int vertexId;
};
__global__
void getBestEdge(SV_Header* headers, SV_Data* datas, int* indices, Vector3f* vertices, bool* selected, Quadric_GPU* vertexQuadric, BestEdge* BestEdges, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId || false == selected[vertexId])
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
Quadric_GPU q = vertexQuadric[vertexId];
double bestError = 10000.0;
int bestVertex = -1;
for (int i = 0; i < header.size; ++i)
{
int indexStart = data.indexStart[i];
for (int j = 0; j < 3; ++j)
{
int checkVertexId = indices[indexStart + j];
double error = q.evalute(vertices[checkVertexId]);
if (error < bestError)
{
bestError = error;
bestVertex = checkVertexId;
}
}
}
BestEdges[vertexId].vertexId = bestVertex;
BestEdges[vertexId].error = bestError;
}
__global__
void sortBestEdge()
{
}
__global__
void collapse()
{
}
int ZEPHYR_GPU_API Zephyr::GPU::decimateSuperVertex(Common::OpenMeshMesh & mesh, unsigned int targetFaceCount, unsigned int binSize)
{
Timer time("Super Vertex");
auto& omesh = mesh.getMesh();
// Setup the vertex list
auto points = omesh.points();
size_t vertexCount = omesh.n_vertices();
Vector3f* d_vertices_ptr;
size_t verticesSize = vertexCount * sizeof(Vector3f);
hipMalloc((void**)&d_vertices_ptr, verticesSize);
hipMemcpy(d_vertices_ptr, points, verticesSize, hipMemcpyHostToDevice);
// Setup the face index
thrust::host_vector<int> indices(omesh.n_faces() * 3);
tbb::parallel_for((size_t)0, omesh.n_faces(), [&](const size_t faceId)
{
FaceHandle fh(faceId);
int index = faceId * 3;
for (auto fv : omesh.fv_range(fh))
{
indices[index++] = fv.idx();
}
});
int* d_indices_ptr;
size_t indicesSize = indices.size() * sizeof(int);
hipMalloc((void**)&d_indices_ptr, indicesSize);
hipMemcpy(d_indices_ptr, &indices[0], indicesSize, hipMemcpyHostToDevice);
// Setup the SV_Header and SV_Data
thrust::device_vector<SV_Header> d_headers(vertexCount);
auto d_header_ptr = thrust::raw_pointer_cast(&d_headers[0]);
thrust::device_vector<SV_Data> d_datas(vertexCount);
auto d_datas_ptr = thrust::raw_pointer_cast(&d_datas[0]);
int maxThreadPerBlock = QueryDevice::getMaxThreadPerBlock(0) / 2;
int blockNeeded = (omesh.n_faces() + (maxThreadPerBlock - 1)) / maxThreadPerBlock;
std::cout << "Block Needed: " << blockNeeded << std::endl;
// Setup the header and data
hipLaunchKernelGGL(( setupHeaderAndData), dim3(blockNeeded), dim3(maxThreadPerBlock), 0, 0, d_indices_ptr, d_header_ptr, d_datas_ptr, MAX_FACE);
//CudaCheckError();
// compute the face quadrics
thrust::device_vector<Quadric_GPU> FacesQuadric(omesh.n_faces());
auto d_FaceQuadric_ptr = thrust::raw_pointer_cast(&FacesQuadric[0]);
hipLaunchKernelGGL(( computeSVQuadricAllFace), dim3(blockNeeded), dim3(maxThreadPerBlock), 0, 0, d_indices_ptr, d_vertices_ptr, d_FaceQuadric_ptr);
//CudaCheckError();
thrust::device_vector<Quadric_GPU> vertexQuadric(vertexCount);
auto d_vertexQuadric_ptr = thrust::raw_pointer_cast(&vertexQuadric[0]);
blockNeeded = (omesh.n_vertices() + (maxThreadPerBlock - 1)) / maxThreadPerBlock;
hipLaunchKernelGGL(( computeSVVertexQuadric), dim3(blockNeeded), dim3(maxThreadPerBlock), 0, 0, d_header_ptr, d_datas_ptr, d_FaceQuadric_ptr, d_vertexQuadric_ptr, vertexCount);
//CudaCheckError();
int currentFaceCount = omesh.n_faces();
//while (targetFaceCount < currentFaceCount)
{
thrust::device_vector<bool> d_vertexUsed(vertexCount);
auto d_vertexUsed_ptr = thrust::raw_pointer_cast(&d_vertexUsed[0]);
thrust::device_vector<bool> d_independentVertex(vertexCount);
auto d_independentVertex_ptr = thrust::raw_pointer_cast(&d_independentVertex[0]);
//blockNeeded = (binSize + (maxThreadPerBlock - 1)) / maxThreadPerBlock;
hipLaunchKernelGGL(( selectIndependentVertex), dim3(blockNeeded), dim3(maxThreadPerBlock), 0, 0, d_header_ptr, d_datas_ptr, d_indices_ptr, d_vertexUsed_ptr, d_independentVertex_ptr, vertexCount);
//CudaCheckError();
thrust::device_vector<bool> d_checkedvertexUsed(vertexCount);
auto d_checkedvertexUsed_ptr = thrust::raw_pointer_cast(&d_checkedvertexUsed[0]);
hipLaunchKernelGGL(( checkIndependentVertex), dim3(blockNeeded), dim3(maxThreadPerBlock), 0, 0, d_header_ptr, d_datas_ptr, d_indices_ptr, d_independentVertex_ptr, d_checkedvertexUsed_ptr, vertexCount);
//CudaCheckError();
thrust::device_vector<BestEdge> d_bestedges(vertexCount);
auto d_bestEdges_ptr = thrust::raw_pointer_cast(&d_bestedges[0]);
hipLaunchKernelGGL(( getBestEdge), dim3(blockNeeded), dim3(maxThreadPerBlock), 0, 0, d_header_ptr, d_datas_ptr, d_indices_ptr, d_vertices_ptr, d_checkedvertexUsed_ptr, d_vertexQuadric_ptr, d_bestEdges_ptr, vertexCount);
//CudaCheckError();
}
time.reportTime();
return 0;
} | 1a4354bce7a1129df92b4bfc58e3088dade1bdf0.cu | #include "Decimate.h"
#include "Quadric_GPU.cuh"
#include "QEM_Data.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <tbb/parallel_for.h>
#include <tbb/parallel_for_each.h>
#include <tbb/mutex.h>
#include <map>
#include <thread>
#include <Timer.h>
#include <Random.h>
#include "../Random.cuh"
#include "../QueryDevice.h"
#include <Decimate/QuadricError.h>
using namespace Zephyr;
using namespace Zephyr::Common;
using namespace Zephyr::GPU;
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall(cudaError err, const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
if (cudaSuccess != err)
{
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
inline void __cudaCheckError(const char *file, const int line)
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if (cudaSuccess != err)
{
fprintf(stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString(err));
exit(-1);
}
#endif
return;
}
__constant__ double MAX_ERRORS[2]; // quadric error, max flip error
__device__
Quadric_GPU computeFaceQE(INDEX_TYPE IdStart, INDEX_TYPE* index, Vector3f* vertices)
{
Vector3f v0 = vertices[index[IdStart]];
Vector3f v1 = vertices[index[IdStart + 1]];
Vector3f v2 = vertices[index[IdStart + 2]];
Vector3f n = (v1 - v0).cross(v2 - v0);
double area = n.norm();
if (area > FLT_MIN)
{
n /= area;
area *= 0.5;
}
double a = n[0];
double b = n[1];
double c = n[2];
double d = -(v0.dot(n));
Quadric_GPU q(a, b, c, d);
q *= area;
return q;
}
__device__
double computeError(int id, QEM_Data* QEM_Datas)
{
auto data = QEM_Datas[id];
// if invalid just return max error
if (!data.bValid)
return 10000.0;
Quadric_GPU q;
for (int i = 0; i < data.indexCount; i += 3)
{
q += computeFaceQE(i, data.indices, data.vertices);
}
double err = q.evalute(data.vertices[data.vertexToKeepId]);
//printf("err: %f\n", err);
return (err < MAX_ERRORS[0]) ? err : 10000.0;
}
__global__
void computeErrors(QEM_Data* QEM_Datas, double* errors)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
errors[i] = computeError(i, QEM_Datas);
}
__global__
void selectBestEdge(double* errors, int* random, int* bestEdge)
{
extern __shared__ double sErrors[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
// copy the errors to shared memory
sErrors[threadIdx.x] = errors[i];
__syncthreads();
// Only one thread per block can search for the best edge
if (0 == threadIdx.x)
{
double bestError = 10000.0f;
int bestHalfEdge = -1;
for (int i = 0; i < blockDim.x; ++i)
{
double err = sErrors[i];
if (bestError > err)
{
bestError = err;
bestHalfEdge = random[blockIdx.x * blockDim.x + i];
}
}
//printf("id: %d, Score: %f", bestHalfEdge, bestError);
bestEdge[blockIdx.x] = bestHalfEdge;
}
}
struct ConstError
{
ConstError(double maxQuadricError_, double maxNormalFlipDeviation_)
: maxQuadricError(maxQuadricError_), maxNormalFlipDeviation(maxNormalFlipDeviation_) {}
double maxQuadricError;
double maxNormalFlipDeviation;
};
int GPU::decimate(Common::OpenMeshMesh & mesh, unsigned int targetFaceCount, unsigned int binSize, Algorithm::DecimationType type)
{
Common::Timer timer;
auto& omesh = mesh.getMesh();
int collapseCount = -1;
auto previousFaceCount = omesh.n_faces();
std::cout << "Using ";
if (Algorithm::DecimationType::GPU_RANDOM_DECIMATE == type)
{
std::cout << "GPU Random Decimation..." << std::endl;
collapseCount = GPU::decimateMC(mesh, targetFaceCount, binSize);
}
else if (Algorithm::DecimationType::GPU_SUPER_VERTEX == type)
{
std::cout << "GPU Super Vertex..." << std::endl;
collapseCount = GPU::decimateSuperVertex(mesh, targetFaceCount, binSize);
}
auto elapseTime = timer.getElapsedTime();
auto& omeshDecimated = mesh.getMesh();
omesh.garbage_collection();
std::cout << "Decimation done in " << elapseTime << " sec" << std::endl;
std::cout << "Original Face Count: " << previousFaceCount << std::endl;
std::cout << "Target Face Count: " << targetFaceCount << std::endl;
std::cout << "Removed Face Count: " << collapseCount << std::endl;
std::cout << "Decimated Face Count: " << omeshDecimated.n_faces() << std::endl;
std::cout << "Percentage decimated: " << ((previousFaceCount - omeshDecimated.n_faces()) / (float)previousFaceCount) * 100.0f << " %" << std::endl;
return collapseCount;
}
int GPU::decimateMC(Common::OpenMeshMesh & mesh, unsigned int targetFaceCount, unsigned int binSize)
{
const float maxQuadricError = 0.1f;
const float maxNormalFlipDeviation = 45.0;
const int maxRetryCount = 50;
// set the constant memory data
ConstError constError(maxQuadricError, maxNormalFlipDeviation);
cudaMemcpyToSymbol(MAX_ERRORS, (ConstError*)&constError, sizeof(constError));
auto& omesh = mesh.getMesh();
int retryCount = 0;
size_t initialFaceCount = omesh.n_faces();
size_t currentFaceCount = initialFaceCount;
size_t totalHalfEdgeCount = omesh.n_halfedges();
size_t totalCollapseRequired = (initialFaceCount - targetFaceCount) / 2;
int numOfThreads = std::thread::hardware_concurrency();
std::vector<std::vector<std::pair<float, HalfedgeHandle>>> selectedErrorEdgesPerThread(numOfThreads);
std::vector<std::shared_ptr<RandomGenerator>> randomGenerators;
//QueryDevice::printQuery();
// compute the total number of block we require to complete the task
int N = totalCollapseRequired;
int threadPerBlock = binSize;
// check how many block we can run together at once
int oneIterationBlockCount = QueryDevice::computeOptimalBlockCount(N, threadPerBlock, 0);
int oneIterationSelectionSize = oneIterationBlockCount * threadPerBlock;
std::cout << "1 iteration block count: " << oneIterationBlockCount << std::endl;
std::cout << "1 iteration selection size: " << oneIterationSelectionSize << std::endl;
int randomSequence = 0;
thrust::device_vector<int> d_randomEdgesId(oneIterationSelectionSize);
thrust::host_vector<int, thrust::system::cuda::experimental::pinned_allocator<int>> h_randomEdgesId(oneIterationSelectionSize);
thrust::device_vector<double> d_Errors(oneIterationBlockCount);
thrust::device_vector<int> d_bestEdges(oneIterationBlockCount);
thrust::host_vector <int, thrust::system::cuda::experimental::pinned_allocator<int>> h_BestEdge(oneIterationBlockCount);
CopyPartialMesh partialMesh(oneIterationSelectionSize);
int* d_BestEdges_ptr = thrust::raw_pointer_cast(&d_bestEdges[0]);
int* d_randomEdgesId_ptr = thrust::raw_pointer_cast(&d_randomEdgesId[0]);
double* d_Errors_ptr = thrust::raw_pointer_cast(&d_Errors[0]);
// do the first random generation outside of iteration.
Random::generateRandomInt(d_randomEdgesId, 0, (int)totalHalfEdgeCount - 1, randomSequence);
h_randomEdgesId = d_randomEdgesId;
int totalCollapseCount = 0;
int garbageCollectTrigger = oneIterationBlockCount / 2;
// do the exact collapse
int collapseCount = std::numeric_limits<int>::max();
while (retryCount < maxRetryCount && currentFaceCount > targetFaceCount && collapseCount > (0.05 * oneIterationBlockCount))
{
collapseCount = 0;
// Data marshalling and CUDA kernel call
{
// Synchronize the async copy of random edge id from device to host.
cudaDeviceSynchronize();
//Timer copyPartialTimer;
QEM_Data* d_QEM_Datas_ptr = partialMesh.copyPartialMesh(omesh, h_randomEdgesId);
//std::cout << "Copy partial time: " << copyPartialTimer.getElapsedTime() << std::endl;
//Timer ComputeErrorTimer;
// Compute the Error of each random edge selected.
computeErrors <<< oneIterationBlockCount, threadPerBlock >>>(d_QEM_Datas_ptr, d_Errors_ptr);
//std::cout << "Compute Error time: " << ComputeErrorTimer.getElapsedTime() << std::endl;
//CudaCheckError();
// Compare and find the edge with best score.
//Timer bestEdgeTimer;
selectBestEdge <<< oneIterationBlockCount, threadPerBlock, threadPerBlock * sizeof(double) >>>(d_Errors_ptr, d_randomEdgesId_ptr, d_BestEdges_ptr);
//std::cout << "Best Edge time: " << bestEdgeTimer.getElapsedTime() << std::endl;
//CudaCheckError();
// generate the next set of random number
//Timer randomTimer;
Random::generateRandomInt(d_randomEdgesId, 0, (int)totalHalfEdgeCount - 1, randomSequence);
// Interleave, Async copy from device to host
cudaMemcpyAsync(thrust::raw_pointer_cast(h_randomEdgesId.data()),
thrust::raw_pointer_cast(d_randomEdgesId.data()),
d_randomEdgesId.size()*sizeof(int),
cudaMemcpyDeviceToHost);
//std::cout << "Generate Random time: " << randomTimer.getElapsedTime() << std::endl;
// copy the result of the kernel back to host
h_BestEdge = d_bestEdges;
}
Timer collapseTimer;
int faceCollapsed = 0;
for (auto bestEdgeId : h_BestEdge)
{
if (bestEdgeId == -1)
continue;
HalfedgeHandle halfEdgeHandle(bestEdgeId);
if (!omesh.is_collapse_ok(halfEdgeHandle))
continue;
// if the edge is a boundary edge only 1 face is removed by the collapse, otherwise 2 face is removed
faceCollapsed += omesh.is_boundary(halfEdgeHandle) ? 1 : 2;
omesh.collapse(halfEdgeHandle);
++collapseCount;
}
// Do a garbage collection when invalid cross the trigger threshold
// Doing garbage collection will remove the deleted edge from the data set, hence removing chance of picking an already deleted edge.
if (collapseCount < garbageCollectTrigger)
{
omesh.garbage_collection();
totalHalfEdgeCount = omesh.n_halfedges();
garbageCollectTrigger /= 2;
//std::cout << "Remaining Half Edge: " << totalHalfEdgeCount << std::endl;
}
totalCollapseCount += collapseCount;
//std::cout << "Total Collapsed this iteration: " << collapseCount << " Total Collapsed: " << totalCollapseCount << std::endl;
//std::cout << "Original Collapsed this iteration: " << originalCollapse << std::endl;
//std::cout << "Additional Collapsed this iteration: " << additionalCollapse << std::endl;
//std::cout << "Total Collapsed : " << totalCollapseCount << std::endl;
currentFaceCount -= faceCollapsed;
//std::cout << "Collapse time: " << collapseTimer.getElapsedTime() << std::endl;
}
omesh.garbage_collection();
return totalCollapseCount;
}
// Super Vertex Decimation
struct SV_Header
{
__device__
SV_Header() : size(0) {}
unsigned int size;
};
struct SV_Data
{
int indexStart[MAX_FACE];
};
__global__
void setupHeaderAndData(int* indices, SV_Header* headers, SV_Data* datas, unsigned char maxFace)
{
int faceId = blockIdx.x * blockDim.x + threadIdx.x;
int index = faceId * 3;
for (int i = 0; i < 3; ++i)
{
int vertexId = indices[index + i];
unsigned size = atomicInc(&headers[vertexId].size, maxFace);
datas[vertexId].indexStart[size] = index;
}
}
__device__
Quadric_GPU computeSVFaceQE(int IdStart, int* index, Vector3f* vertices)
{
Vector3f v0 = vertices[index[IdStart]];
Vector3f v1 = vertices[index[IdStart + 1]];
Vector3f v2 = vertices[index[IdStart + 2]];
Vector3f n = (v1 - v0).cross(v2 - v0);
double area = n.norm();
if (area > FLT_MIN)
{
n /= area;
area *= 0.5;
}
double a = n[0];
double b = n[1];
double c = n[2];
double d = -(v0.dot(n));
Quadric_GPU q(a, b, c, d);
q *= area;
return q;
}
__global__
void computeSVQuadricAllFace(int* indices, Vector3f* vertices, Quadric_GPU* FaceQuadric)
{
int faceId = blockIdx.x * blockDim.x + threadIdx.x;
int index = faceId * 3;
FaceQuadric[faceId] = computeSVFaceQE(index, indices, vertices);
}
__global__
void computeSVVertexQuadric(SV_Header* headers, SV_Data* datas, Quadric_GPU* faceQuadric, Quadric_GPU* vertexQuadric, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId)
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
Quadric_GPU q;
for (int i = 0; i < header.size; ++i)
{
int indexStart = data.indexStart[i];
int faceId = indexStart / 3;
q += faceQuadric[faceId];
}
vertexQuadric[vertexId] = q;
}
__global__
void selectIndependentVertex(SV_Header* headers, SV_Data* datas, int* indices, bool* vertexUsed, bool* selected, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId)
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
bool bIndependent = true;
if (false == vertexUsed[vertexId])
{
for (int i = 0; i < header.size && bIndependent; ++i)
{
int indexStart = data.indexStart[i];
for (int j = 0; j < 3 && bIndependent; ++j)
{
int checkVertexId = indices[indexStart + j];
if (true == vertexUsed[checkVertexId])
{
bIndependent = false;
}
}
}
if (bIndependent)
{
selected[vertexId] = true;
vertexUsed[vertexId] = true;
for (int i = 0; i < header.size; ++i)
{
int indexStart = data.indexStart[i];
for (int j = 0; j < 3; ++j)
{
int checkVertexId = indices[indexStart + j];
vertexUsed[checkVertexId] = true;
}
}
}
}
}
__global__
void checkIndependentVertex(SV_Header* headers, SV_Data* datas, int* indices, bool* selected, bool* checked, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId || false == selected[vertexId])
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
bool bValid = true;
for (size_t i = 0; i < vertexCount && bValid; ++i)
{
if (false == selected[i] && i == vertexId)
continue;
// check if there is any conflict
for (int j = 0; j < header.size && bValid; ++j)
{
int indexStart = data.indexStart[j];
for (int k = 0; k < 3 && bValid; ++k)
{
int checkVertexId = indices[indexStart + k];
if (i == checkVertexId) // conflict detected
{
bValid = false;
}
}
}
}
if (bValid)
{
// remove self as independent vertex
checked[vertexId] = true;
}
}
struct BestEdge
{
__device__
BestEdge() : error(10000.0), vertexId(-1) {}
double error;
int vertexId;
};
__global__
void getBestEdge(SV_Header* headers, SV_Data* datas, int* indices, Vector3f* vertices, bool* selected, Quadric_GPU* vertexQuadric, BestEdge* BestEdges, size_t vertexCount)
{
int vertexId = blockIdx.x * blockDim.x + threadIdx.x;
if (vertexCount <= vertexId || false == selected[vertexId])
return;
SV_Header header = headers[vertexId];
SV_Data data = datas[vertexId];
Quadric_GPU q = vertexQuadric[vertexId];
double bestError = 10000.0;
int bestVertex = -1;
for (int i = 0; i < header.size; ++i)
{
int indexStart = data.indexStart[i];
for (int j = 0; j < 3; ++j)
{
int checkVertexId = indices[indexStart + j];
double error = q.evalute(vertices[checkVertexId]);
if (error < bestError)
{
bestError = error;
bestVertex = checkVertexId;
}
}
}
BestEdges[vertexId].vertexId = bestVertex;
BestEdges[vertexId].error = bestError;
}
__global__
void sortBestEdge()
{
}
__global__
void collapse()
{
}
int ZEPHYR_GPU_API Zephyr::GPU::decimateSuperVertex(Common::OpenMeshMesh & mesh, unsigned int targetFaceCount, unsigned int binSize)
{
Timer time("Super Vertex");
auto& omesh = mesh.getMesh();
// Setup the vertex list
auto points = omesh.points();
size_t vertexCount = omesh.n_vertices();
Vector3f* d_vertices_ptr;
size_t verticesSize = vertexCount * sizeof(Vector3f);
cudaMalloc((void**)&d_vertices_ptr, verticesSize);
cudaMemcpy(d_vertices_ptr, points, verticesSize, cudaMemcpyHostToDevice);
// Setup the face index
thrust::host_vector<int> indices(omesh.n_faces() * 3);
tbb::parallel_for((size_t)0, omesh.n_faces(), [&](const size_t faceId)
{
FaceHandle fh(faceId);
int index = faceId * 3;
for (auto fv : omesh.fv_range(fh))
{
indices[index++] = fv.idx();
}
});
int* d_indices_ptr;
size_t indicesSize = indices.size() * sizeof(int);
cudaMalloc((void**)&d_indices_ptr, indicesSize);
cudaMemcpy(d_indices_ptr, &indices[0], indicesSize, cudaMemcpyHostToDevice);
// Setup the SV_Header and SV_Data
thrust::device_vector<SV_Header> d_headers(vertexCount);
auto d_header_ptr = thrust::raw_pointer_cast(&d_headers[0]);
thrust::device_vector<SV_Data> d_datas(vertexCount);
auto d_datas_ptr = thrust::raw_pointer_cast(&d_datas[0]);
int maxThreadPerBlock = QueryDevice::getMaxThreadPerBlock(0) / 2;
int blockNeeded = (omesh.n_faces() + (maxThreadPerBlock - 1)) / maxThreadPerBlock;
std::cout << "Block Needed: " << blockNeeded << std::endl;
// Setup the header and data
setupHeaderAndData<<<blockNeeded, maxThreadPerBlock>>>(d_indices_ptr, d_header_ptr, d_datas_ptr, MAX_FACE);
//CudaCheckError();
// compute the face quadrics
thrust::device_vector<Quadric_GPU> FacesQuadric(omesh.n_faces());
auto d_FaceQuadric_ptr = thrust::raw_pointer_cast(&FacesQuadric[0]);
computeSVQuadricAllFace<<<blockNeeded, maxThreadPerBlock>>>(d_indices_ptr, d_vertices_ptr, d_FaceQuadric_ptr);
//CudaCheckError();
thrust::device_vector<Quadric_GPU> vertexQuadric(vertexCount);
auto d_vertexQuadric_ptr = thrust::raw_pointer_cast(&vertexQuadric[0]);
blockNeeded = (omesh.n_vertices() + (maxThreadPerBlock - 1)) / maxThreadPerBlock;
computeSVVertexQuadric<<<blockNeeded, maxThreadPerBlock>>>(d_header_ptr, d_datas_ptr, d_FaceQuadric_ptr, d_vertexQuadric_ptr, vertexCount);
//CudaCheckError();
int currentFaceCount = omesh.n_faces();
//while (targetFaceCount < currentFaceCount)
{
thrust::device_vector<bool> d_vertexUsed(vertexCount);
auto d_vertexUsed_ptr = thrust::raw_pointer_cast(&d_vertexUsed[0]);
thrust::device_vector<bool> d_independentVertex(vertexCount);
auto d_independentVertex_ptr = thrust::raw_pointer_cast(&d_independentVertex[0]);
//blockNeeded = (binSize + (maxThreadPerBlock - 1)) / maxThreadPerBlock;
selectIndependentVertex<<<blockNeeded, maxThreadPerBlock>>>(d_header_ptr, d_datas_ptr, d_indices_ptr, d_vertexUsed_ptr, d_independentVertex_ptr, vertexCount);
//CudaCheckError();
thrust::device_vector<bool> d_checkedvertexUsed(vertexCount);
auto d_checkedvertexUsed_ptr = thrust::raw_pointer_cast(&d_checkedvertexUsed[0]);
checkIndependentVertex<<<blockNeeded, maxThreadPerBlock>>>(d_header_ptr, d_datas_ptr, d_indices_ptr, d_independentVertex_ptr, d_checkedvertexUsed_ptr, vertexCount);
//CudaCheckError();
thrust::device_vector<BestEdge> d_bestedges(vertexCount);
auto d_bestEdges_ptr = thrust::raw_pointer_cast(&d_bestedges[0]);
getBestEdge<<<blockNeeded, maxThreadPerBlock>>>(d_header_ptr, d_datas_ptr, d_indices_ptr, d_vertices_ptr, d_checkedvertexUsed_ptr, d_vertexQuadric_ptr, d_bestEdges_ptr, vertexCount);
//CudaCheckError();
}
time.reportTime();
return 0;
} |
6e30853039d6410d853e28eb71f750e9de8852fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <cstddef>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are.
Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
// =================================================================================================
__global__ void generatePredicate(const int *d_in, int *d_predicate,
const int size, const int digit)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int val = myItem>>digit;
d_predicate[myId] = (val & 1);
}
// =================================================================================================
// Step efficient scan
__global__ void hillis_steele_algo(unsigned int* d_out, const int* d_in, size_t size)
{
extern __shared__ unsigned int temp[];
int tid = threadIdx.x;
int pout = 0, pin=1;
temp[tid] = tid>0? d_in[tid-1]:0; //exclusive scan
__syncthreads();
//double buffered
for (int offset = 1; offset < size; offset <<= 1) {
pout = 1 - pout;
pin = 1 - pout;
if (tid >= offset)
temp[size*pout + tid] = temp[size*pin + tid]+temp[size*pin + tid - offset];
else
temp[size*pout + tid] = temp[size*pin + tid];
__syncthreads();
}
d_out[tid] = temp[pout*size + tid];
}
int hillis_steele_scan(unsigned int *d_scan, unsigned int *d_predicate, size_t numElems, const int BLOCK_SIZE)
{
int threads = BLOCK_SIZE;
int size = numElems;
dim3 block(threads, 1, 1);
dim3 grid(size/threads, 1, 1);
hipLaunchKernelGGL(( hillis_steele_algo) , dim3(grid), dim3(block), 2 * BLOCK_SIZE*sizeof(unsigned int), 0,
d_scan, d_predicate, numElems);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
int *h_sum;
hipMemcpy(*h_sum, *d_scan[numElems-1], sizeof(unsigned int), hipMemcpyDeviceToHost);
return *h_sum;
}
// =================================================================================================
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
const int ARRAY_SIZE = numElems;
const int maxThreadPerBlock = 1024;
const int threads = maxThreadPerBlock;
const int size = numElems;
const int SIZE_BYTES = size * sizeof(unsigned int);
const int BIN_COUNT = 2;
const int BIN_BYTES = BIN_COUNT * sizeof(unsigned int);
unsigned int *h_histogram[2];
h_histogram[0]=0,h_histogram[1]=0;
unsigned int *d_histogram;
unsigned int *d_predicate;
unsigned int *d_scan_0;
unsigned int *d_scan_1;
checkCudaErrors(hipMalloc(&d_histogram, BIN_BYTES));
checkCudaErrors(hipMalloc(&d_predicate, SIZE_BYTES));
checkCudaErrors(hipMalloc(&d_scan_0, SIZE_BYTES));
checkCudaErrors(hipMalloc(&d_scan_1, SIZE_BYTES));
dim3 block(threads, 1, 1);
dim3 grid(size/threads, 1, 1);
for (size_t d = 0; d< sizeof(unsigned int); ++d)
{
// fill the predicate for COMPACT operation.
// we alternate btw input and output
if (d%2==0)
hipLaunchKernelGGL(( generatePredicate), dim3(grid),dim3(block), 0, 0, d_inputVals, d_predicate, size, d);
else if (d%2==1)
hipLaunchKernelGGL(( generatePredicate), dim3(grid),dim3(block), 0, 0, d_outputVals, d_predicate, size, d);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// histogram to find number of 0s.
// scan to find the position of 0s and 1s.
unsigned int nOnes = hillis_steele_scan(unsigned int* d_out, const int* d_in, int size);
// Create the histogram
hipLaunchKernelGGL(( simple_histo), dim3(grid), dim3(block), 0, 0, d_inputVals, val, nZeros);
for (size_t i = 0; i< numElems; ++i)
{
hipLaunchKernelGGL(( Radix_sort), dim3(grid), dim3(block), 0, 0, d_inputVals, d_inputPos, val, nZeros);
}
}
}
| 6e30853039d6410d853e28eb71f750e9de8852fa.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
#include <cstddef>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are.
Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
// =================================================================================================
__global__ void generatePredicate(const int *d_in, int *d_predicate,
const int size, const int digit)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int val = myItem>>digit;
d_predicate[myId] = (val & 1);
}
// =================================================================================================
// Step efficient scan
__global__ void hillis_steele_algo(unsigned int* d_out, const int* d_in, size_t size)
{
extern __shared__ unsigned int temp[];
int tid = threadIdx.x;
int pout = 0, pin=1;
temp[tid] = tid>0? d_in[tid-1]:0; //exclusive scan
__syncthreads();
//double buffered
for (int offset = 1; offset < size; offset <<= 1) {
pout = 1 - pout;
pin = 1 - pout;
if (tid >= offset)
temp[size*pout + tid] = temp[size*pin + tid]+temp[size*pin + tid - offset];
else
temp[size*pout + tid] = temp[size*pin + tid];
__syncthreads();
}
d_out[tid] = temp[pout*size + tid];
}
int hillis_steele_scan(unsigned int *d_scan, unsigned int *d_predicate, size_t numElems, const int BLOCK_SIZE)
{
int threads = BLOCK_SIZE;
int size = numElems;
dim3 block(threads, 1, 1);
dim3 grid(size/threads, 1, 1);
hillis_steele_algo <<<grid, block, 2 * BLOCK_SIZE*sizeof(unsigned int)>>>
(d_scan, d_predicate, numElems);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
int *h_sum;
cudaMemcpy(*h_sum, *d_scan[numElems-1], sizeof(unsigned int), cudaMemcpyDeviceToHost);
return *h_sum;
}
// =================================================================================================
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
const int ARRAY_SIZE = numElems;
const int maxThreadPerBlock = 1024;
const int threads = maxThreadPerBlock;
const int size = numElems;
const int SIZE_BYTES = size * sizeof(unsigned int);
const int BIN_COUNT = 2;
const int BIN_BYTES = BIN_COUNT * sizeof(unsigned int);
unsigned int *h_histogram[2];
h_histogram[0]=0,h_histogram[1]=0;
unsigned int *d_histogram;
unsigned int *d_predicate;
unsigned int *d_scan_0;
unsigned int *d_scan_1;
checkCudaErrors(cudaMalloc(&d_histogram, BIN_BYTES));
checkCudaErrors(cudaMalloc(&d_predicate, SIZE_BYTES));
checkCudaErrors(cudaMalloc(&d_scan_0, SIZE_BYTES));
checkCudaErrors(cudaMalloc(&d_scan_1, SIZE_BYTES));
dim3 block(threads, 1, 1);
dim3 grid(size/threads, 1, 1);
for (size_t d = 0; d< sizeof(unsigned int); ++d)
{
// fill the predicate for COMPACT operation.
// we alternate btw input and output
if (d%2==0)
generatePredicate<<<grid,block>>>(d_inputVals, d_predicate, size, d);
else if (d%2==1)
generatePredicate<<<grid,block>>>(d_outputVals, d_predicate, size, d);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// histogram to find number of 0s.
// scan to find the position of 0s and 1s.
unsigned int nOnes = hillis_steele_scan(unsigned int* d_out, const int* d_in, int size);
// Create the histogram
simple_histo<<<grid, block>>>(d_inputVals, val, nZeros);
for (size_t i = 0; i< numElems; ++i)
{
Radix_sort<<<grid, block>>>(d_inputVals, d_inputPos, val, nZeros);
}
}
}
|
16f2ec60012cb4eeed05f65dbd0c17eb1063ca4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front;
int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front;
int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front;
int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front;
int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim0_update_halo_kernel2_yvel_plus_4_front * \
ydim0_update_halo_kernel2_yvel_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim1_update_halo_kernel2_yvel_plus_4_front * \
ydim1_update_halo_kernel2_yvel_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_plus_4_front_gpu(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front *
ydim0_update_halo_kernel2_yvel_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front *
ydim1_update_halo_kernel2_yvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_plus_4_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 91))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel2_yvel_plus_4_front");
OPS_kernels[91].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_4_front), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[91].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 16f2ec60012cb4eeed05f65dbd0c17eb1063ca4b.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_plus_4_front;
int xdim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_plus_4_front;
int ydim0_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_plus_4_front;
int xdim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_plus_4_front;
int ydim1_update_halo_kernel2_yvel_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim0_update_halo_kernel2_yvel_plus_4_front * \
ydim0_update_halo_kernel2_yvel_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_plus_4_front * (y) + \
xdim1_update_halo_kernel2_yvel_plus_4_front * \
ydim1_update_halo_kernel2_yvel_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_plus_4_front_gpu(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = yvel0[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = yvel1[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_plus_4_front(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_plus_4_front *
ydim0_update_halo_kernel2_yvel_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_plus_4_front *
ydim1_update_halo_kernel2_yvel_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_plus_4_front_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_plus_4_front(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 91))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(91, "update_halo_kernel2_yvel_plus_4_front");
OPS_kernels[91].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_plus_4_front_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_plus_4_front_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_plus_4_front_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_plus_4_front_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_yvel_plus_4_front<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[91].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[91].mpi_time += t2 - t1;
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[91].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
8b9cbb1289e9be8443db000fc5a5d06820559133.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "scalarProd.h"
void Manage_Memory(int phase, float **h_a, float **h_b, float **d_a){
hipError_t Error;
if (phase==0) {
// allocate h_a and h_b
*h_a = (float*)malloc( N*sizeof(float) );
*h_b = (float*)malloc( N*sizeof(float) );
}
if (phase==1) {
// allocate d_a
Error = hipMalloc((void**)d_a,N*sizeof(float));
if (DEBUG) printf("CUDA error (hipMalloc) = %s\n",hipGetErrorString(Error));
}
if (phase==2) {
// free h_a, h_b and d_a
free(*h_a);
free(*h_b);
hipFree(*d_a);
}
}
void Manage_Comms(int phase, float **h_u, float **d_u){
hipError_t Error;
if (phase==0) {
// send data from host to device
Error = hipMemcpy(*d_u,*h_u,N*sizeof(float),hipMemcpyHostToDevice);
if (DEBUG) printf("CUDA error (hipMemcpy d -> h ) = %s\n",hipGetErrorString(Error));
}
if (phase==1) {
// send data from device to host
Error = hipMemcpy(*h_u,*d_u,N*sizeof(float),hipMemcpyDeviceToHost);
if (DEBUG) printf("CUDA error (hipMemcpy h -> d ) = %s\n",hipGetErrorString(Error));
}
}
__global__ void GPU_Func(float *u){
// threads index
int i = threadIdx.x + blockIdx.x * blockDim.x;
// duplicate the value of vector u
if (i < N) {
u[i] = 2*u[i];
}
}
void My_GPU_Func(float **u){
int threads= 128;
int blocks=(N+threads-1)/threads;
hipLaunchKernelGGL(( GPU_Func), dim3(blocks),dim3(threads), 0, 0, *u);
}
void Save_Results(float *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int i = 0; i < N; i++) {
fprintf(pFile, "%d\t %g\n",i,u[i]);
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
| 8b9cbb1289e9be8443db000fc5a5d06820559133.cu |
#include "scalarProd.h"
void Manage_Memory(int phase, float **h_a, float **h_b, float **d_a){
cudaError_t Error;
if (phase==0) {
// allocate h_a and h_b
*h_a = (float*)malloc( N*sizeof(float) );
*h_b = (float*)malloc( N*sizeof(float) );
}
if (phase==1) {
// allocate d_a
Error = cudaMalloc((void**)d_a,N*sizeof(float));
if (DEBUG) printf("CUDA error (cudaMalloc) = %s\n",cudaGetErrorString(Error));
}
if (phase==2) {
// free h_a, h_b and d_a
free(*h_a);
free(*h_b);
cudaFree(*d_a);
}
}
void Manage_Comms(int phase, float **h_u, float **d_u){
cudaError_t Error;
if (phase==0) {
// send data from host to device
Error = cudaMemcpy(*d_u,*h_u,N*sizeof(float),cudaMemcpyHostToDevice);
if (DEBUG) printf("CUDA error (cudaMemcpy d -> h ) = %s\n",cudaGetErrorString(Error));
}
if (phase==1) {
// send data from device to host
Error = cudaMemcpy(*h_u,*d_u,N*sizeof(float),cudaMemcpyDeviceToHost);
if (DEBUG) printf("CUDA error (cudaMemcpy h -> d ) = %s\n",cudaGetErrorString(Error));
}
}
__global__ void GPU_Func(float *u){
// threads index
int i = threadIdx.x + blockIdx.x * blockDim.x;
// duplicate the value of vector u
if (i < N) {
u[i] = 2*u[i];
}
}
void My_GPU_Func(float **u){
int threads= 128;
int blocks=(N+threads-1)/threads;
GPU_Func<<<blocks,threads>>>(*u);
}
void Save_Results(float *u){
// print result to txt file
FILE *pFile = fopen("result.txt", "w");
if (pFile != NULL) {
for (int i = 0; i < N; i++) {
fprintf(pFile, "%d\t %g\n",i,u[i]);
}
fclose(pFile);
} else {
printf("Unable to save to file\n");
}
}
|
e8c3fc6a46dcae00424e062510ce963670b74e2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layers/elementwise.h"
#include "reduceUtils_hip.cuh"
namespace graphdl
{
namespace core
{
namespace layers
{
namespace cuda
{
namespace
{
template <Elementwise elem>
__device__ float op(float f1, float f2);
template <>
__device__ float op<Elementwise::kADD>(float f1, float f2)
{
return f1 + f2;
}
template <>
__device__ float op<Elementwise::kSUB>(float f1, float f2)
{
return f1 - f2;
}
template <>
__device__ float op<Elementwise::kMUL>(float f1, float f2)
{
return f1 * f2;
}
template <>
__device__ float op<Elementwise::kDIV>(float f1, float f2)
{
return f1 / f2;
}
template <Elementwise elem, int n>
__device__ float opGrad(float f1, float f2);
template <>
__device__ float opGrad<Elementwise::kADD, 0>(float f1, float f2)
{
return 1.;
}
template <>
__device__ float opGrad<Elementwise::kADD, 1>(float f1, float f2)
{
return 1.;
}
template <>
__device__ float opGrad<Elementwise::kSUB, 0>(float f1, float f2)
{
return 1.;
}
template <>
__device__ float opGrad<Elementwise::kSUB, 1>(float f1, float f2)
{
return -1.;
}
template <>
__device__ float opGrad<Elementwise::kMUL, 0>(float f1, float f2)
{
return f2;
}
template <>
__device__ float opGrad<Elementwise::kMUL, 1>(float f1, float f2)
{
return f1;
}
template <>
__device__ float opGrad<Elementwise::kDIV, 0>(float f1, float f2)
{
return 1. / f2;
}
template <>
__device__ float opGrad<Elementwise::kDIV, 1>(float f1, float f2)
{
return -f1 / (f2 * f2);
}
template <Elementwise elem>
__global__ void elementwiseBackKernel(const float* x1, size_t size1,
const float* x2, size_t size2, float* y)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size1 || id < size2)
y[id] = op<elem>(x1[id % size1], x2[id % size2]);
}
template <Elementwise elem, int n>
__global__ void elementwiseGradientKernelBig(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yG, float* xG)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size1 || id < size2)
xG[id] = yG[id] * opGrad<elem, n>(x1[id % size1], x2[id % size2]);
}
template <Elementwise elem, int n>
__global__ void elementwiseGradientKernelSmall(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yG, float* out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
size_t minSize = (size1 > size2) ? size2 : size1;
size_t maxSize = (size1 > size2) ? size1 : size2;
if (id < maxSize)
out[(maxSize / minSize) * (id % minSize) + (id / minSize)] =
yG[id] * opGrad<elem, n>(x1[id % size1], x2[id % size2]);
}
template <Elementwise elem>
void runBackGradientKernels(const float* x1, size_t size1, const float* x2,
size_t size2, const float* yG, float* x1Grad,
float* x2Grad)
{
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS =
((size1 > size2 ? size1 : size2) + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (size1 == size2)
{
hipLaunchKernelGGL(( elementwiseGradientKernelBig<elem, 0>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, yG, x1Grad);
hipLaunchKernelGGL(( elementwiseGradientKernelBig<elem, 1>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, yG, x2Grad);
return;
}
float* temp;
hipMalloc((void**)&temp, (size1 > size2 ? size1 : size2) * sizeof(float));
if (size1 > size2)
{
hipLaunchKernelGGL(( elementwiseGradientKernelBig<elem, 0>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, yG, x1Grad);
hipLaunchKernelGGL(( elementwiseGradientKernelSmall<elem, 1>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, yG, temp);
reduce<ReduceOpCuda::kSUM>(temp, x2Grad, size2, size1 / size2);
}
else
{
hipLaunchKernelGGL(( elementwiseGradientKernelSmall<elem, 0>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, yG, temp);
hipLaunchKernelGGL(( elementwiseGradientKernelBig<elem, 1>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, yG, x2Grad);
reduce<ReduceOpCuda::kSUM>(temp, x1Grad, size1, size2 / size1);
}
hipFree(temp);
}
template <Elementwise elem, int b>
__global__ void elementwiseFrontKernel(const float* x1, size_t size,
const float* x2, size_t reduceSize,
float* y)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
{
if (b == 1)
y[id] = op<elem>(x1[id], x2[id / reduceSize]);
else
y[id] = op<elem>(x1[id / reduceSize], x2[id]);
}
}
template <Elementwise elem, int n>
__global__ void elementwiseFrontGradientKernel(const float* x1, const float* x2,
size_t size, size_t reduceSize,
const float* yGrad, float* xGrad,
float* temp)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
{
if (n == 0)
{
xGrad[id] =
yGrad[id] * opGrad<elem, 0>(x1[id], x2[id / reduceSize]);
temp[id] = yGrad[id] * opGrad<elem, 1>(x1[id], x2[id / reduceSize]);
}
else
{
temp[id] = yGrad[id] * opGrad<elem, 0>(x1[id / reduceSize], x2[id]);
xGrad[id] =
yGrad[id] * opGrad<elem, 1>(x1[id / reduceSize], x2[id]);
}
}
}
} // namespace
void runElementwiseBackDevice(const float* x1, size_t size1, const float* x2,
size_t size2, float* y, Elementwise op)
{
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS =
((size1 > size2 ? size1 : size2) + BLOCK_SIZE - 1) / BLOCK_SIZE;
switch (op)
{
case Elementwise::kADD:
hipLaunchKernelGGL(( elementwiseBackKernel<Elementwise::kADD>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, y);
break;
case Elementwise::kSUB:
hipLaunchKernelGGL(( elementwiseBackKernel<Elementwise::kSUB>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, y);
break;
case Elementwise::kMUL:
hipLaunchKernelGGL(( elementwiseBackKernel<Elementwise::kMUL>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, y);
break;
case Elementwise::kDIV:
hipLaunchKernelGGL(( elementwiseBackKernel<Elementwise::kDIV>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size1, x2, size2, y);
break;
}
}
void runElementwiseBackGradientDevice(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yGrad, float* x1Grad,
float* x2Grad, Elementwise op)
{
switch (op)
{
case Elementwise::kADD:
runBackGradientKernels<Elementwise::kADD>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
case Elementwise::kSUB:
runBackGradientKernels<Elementwise::kSUB>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
case Elementwise::kMUL:
runBackGradientKernels<Elementwise::kMUL>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
case Elementwise::kDIV:
runBackGradientKernels<Elementwise::kDIV>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
}
}
void runElementwiseFrontDevice(const float* x1, size_t size1, const float* x2,
size_t size2, float* y, Elementwise op)
{
size_t size = size1 > size2 ? size1 : size2;
size_t reduceSize = size / (size1 < size2 ? size1 : size2);
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
#define CASE_RUN_KERNEL(OP, NUM) \
case Elementwise::OP: \
hipLaunchKernelGGL(( elementwiseFrontKernel<Elementwise::OP, NUM>) \
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, size, x2, reduceSize, y); \
break;
if (size1 > size2)
{
switch (op)
{
CASE_RUN_KERNEL(kADD, 1)
CASE_RUN_KERNEL(kSUB, 1)
CASE_RUN_KERNEL(kMUL, 1)
CASE_RUN_KERNEL(kDIV, 1)
}
}
else
{
switch (op)
{
CASE_RUN_KERNEL(kADD, 2)
CASE_RUN_KERNEL(kSUB, 2)
CASE_RUN_KERNEL(kMUL, 2)
CASE_RUN_KERNEL(kDIV, 2)
}
}
#undef CASE_RUN_KERNEL
}
void runElementwiseFrontGradientDevice(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yGrad, float* x1Grad,
float* x2Grad, Elementwise op)
{
size_t size = size1 > size2 ? size1 : size2;
size_t reduceSize = size / (size1 < size2 ? size1 : size2);
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
float* temp;
hipMalloc(&temp, size * sizeof(float));
if (size1 > size2)
{
switch (op)
{
case Elementwise::kADD:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kADD, 0>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
case Elementwise::kSUB:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kSUB, 0>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
case Elementwise::kMUL:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kMUL, 0>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
case Elementwise::kDIV:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kDIV, 0>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
}
reduce<ReduceOpCuda::kSUM>(temp, x2Grad, size2, reduceSize);
}
else
{
switch (op)
{
case Elementwise::kADD:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kADD, 1>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
case Elementwise::kSUB:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kSUB, 1>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
case Elementwise::kMUL:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kMUL, 1>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
case Elementwise::kDIV:
hipLaunchKernelGGL(( elementwiseFrontGradientKernel<Elementwise::kDIV, 1>)
, dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
}
reduce<ReduceOpCuda::kSUM>(temp, x1Grad, size1, reduceSize);
}
hipFree(temp);
}
} // namespace cuda
} // namespace layers
} // namespace core
} // namespace graphdl
| e8c3fc6a46dcae00424e062510ce963670b74e2d.cu | #include "layers/elementwise.h"
#include "reduceUtils.cuh"
namespace graphdl
{
namespace core
{
namespace layers
{
namespace cuda
{
namespace
{
template <Elementwise elem>
__device__ float op(float f1, float f2);
template <>
__device__ float op<Elementwise::kADD>(float f1, float f2)
{
return f1 + f2;
}
template <>
__device__ float op<Elementwise::kSUB>(float f1, float f2)
{
return f1 - f2;
}
template <>
__device__ float op<Elementwise::kMUL>(float f1, float f2)
{
return f1 * f2;
}
template <>
__device__ float op<Elementwise::kDIV>(float f1, float f2)
{
return f1 / f2;
}
template <Elementwise elem, int n>
__device__ float opGrad(float f1, float f2);
template <>
__device__ float opGrad<Elementwise::kADD, 0>(float f1, float f2)
{
return 1.;
}
template <>
__device__ float opGrad<Elementwise::kADD, 1>(float f1, float f2)
{
return 1.;
}
template <>
__device__ float opGrad<Elementwise::kSUB, 0>(float f1, float f2)
{
return 1.;
}
template <>
__device__ float opGrad<Elementwise::kSUB, 1>(float f1, float f2)
{
return -1.;
}
template <>
__device__ float opGrad<Elementwise::kMUL, 0>(float f1, float f2)
{
return f2;
}
template <>
__device__ float opGrad<Elementwise::kMUL, 1>(float f1, float f2)
{
return f1;
}
template <>
__device__ float opGrad<Elementwise::kDIV, 0>(float f1, float f2)
{
return 1. / f2;
}
template <>
__device__ float opGrad<Elementwise::kDIV, 1>(float f1, float f2)
{
return -f1 / (f2 * f2);
}
template <Elementwise elem>
__global__ void elementwiseBackKernel(const float* x1, size_t size1,
const float* x2, size_t size2, float* y)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size1 || id < size2)
y[id] = op<elem>(x1[id % size1], x2[id % size2]);
}
template <Elementwise elem, int n>
__global__ void elementwiseGradientKernelBig(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yG, float* xG)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size1 || id < size2)
xG[id] = yG[id] * opGrad<elem, n>(x1[id % size1], x2[id % size2]);
}
template <Elementwise elem, int n>
__global__ void elementwiseGradientKernelSmall(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yG, float* out)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
size_t minSize = (size1 > size2) ? size2 : size1;
size_t maxSize = (size1 > size2) ? size1 : size2;
if (id < maxSize)
out[(maxSize / minSize) * (id % minSize) + (id / minSize)] =
yG[id] * opGrad<elem, n>(x1[id % size1], x2[id % size2]);
}
template <Elementwise elem>
void runBackGradientKernels(const float* x1, size_t size1, const float* x2,
size_t size2, const float* yG, float* x1Grad,
float* x2Grad)
{
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS =
((size1 > size2 ? size1 : size2) + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (size1 == size2)
{
elementwiseGradientKernelBig<elem, 0>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, yG, x1Grad);
elementwiseGradientKernelBig<elem, 1>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, yG, x2Grad);
return;
}
float* temp;
cudaMalloc((void**)&temp, (size1 > size2 ? size1 : size2) * sizeof(float));
if (size1 > size2)
{
elementwiseGradientKernelBig<elem, 0>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, yG, x1Grad);
elementwiseGradientKernelSmall<elem, 1>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, yG, temp);
reduce<ReduceOpCuda::kSUM>(temp, x2Grad, size2, size1 / size2);
}
else
{
elementwiseGradientKernelSmall<elem, 0>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, yG, temp);
elementwiseGradientKernelBig<elem, 1>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, yG, x2Grad);
reduce<ReduceOpCuda::kSUM>(temp, x1Grad, size1, size2 / size1);
}
cudaFree(temp);
}
template <Elementwise elem, int b>
__global__ void elementwiseFrontKernel(const float* x1, size_t size,
const float* x2, size_t reduceSize,
float* y)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
{
if (b == 1)
y[id] = op<elem>(x1[id], x2[id / reduceSize]);
else
y[id] = op<elem>(x1[id / reduceSize], x2[id]);
}
}
template <Elementwise elem, int n>
__global__ void elementwiseFrontGradientKernel(const float* x1, const float* x2,
size_t size, size_t reduceSize,
const float* yGrad, float* xGrad,
float* temp)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < size)
{
if (n == 0)
{
xGrad[id] =
yGrad[id] * opGrad<elem, 0>(x1[id], x2[id / reduceSize]);
temp[id] = yGrad[id] * opGrad<elem, 1>(x1[id], x2[id / reduceSize]);
}
else
{
temp[id] = yGrad[id] * opGrad<elem, 0>(x1[id / reduceSize], x2[id]);
xGrad[id] =
yGrad[id] * opGrad<elem, 1>(x1[id / reduceSize], x2[id]);
}
}
}
} // namespace
void runElementwiseBackDevice(const float* x1, size_t size1, const float* x2,
size_t size2, float* y, Elementwise op)
{
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS =
((size1 > size2 ? size1 : size2) + BLOCK_SIZE - 1) / BLOCK_SIZE;
switch (op)
{
case Elementwise::kADD:
elementwiseBackKernel<Elementwise::kADD>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, y);
break;
case Elementwise::kSUB:
elementwiseBackKernel<Elementwise::kSUB>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, y);
break;
case Elementwise::kMUL:
elementwiseBackKernel<Elementwise::kMUL>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, y);
break;
case Elementwise::kDIV:
elementwiseBackKernel<Elementwise::kDIV>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size1, x2, size2, y);
break;
}
}
void runElementwiseBackGradientDevice(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yGrad, float* x1Grad,
float* x2Grad, Elementwise op)
{
switch (op)
{
case Elementwise::kADD:
runBackGradientKernels<Elementwise::kADD>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
case Elementwise::kSUB:
runBackGradientKernels<Elementwise::kSUB>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
case Elementwise::kMUL:
runBackGradientKernels<Elementwise::kMUL>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
case Elementwise::kDIV:
runBackGradientKernels<Elementwise::kDIV>(x1, size1, x2, size2, yGrad,
x1Grad, x2Grad);
break;
}
}
void runElementwiseFrontDevice(const float* x1, size_t size1, const float* x2,
size_t size2, float* y, Elementwise op)
{
size_t size = size1 > size2 ? size1 : size2;
size_t reduceSize = size / (size1 < size2 ? size1 : size2);
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
#define CASE_RUN_KERNEL(OP, NUM) \
case Elementwise::OP: \
elementwiseFrontKernel<Elementwise::OP, NUM> \
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, size, x2, reduceSize, y); \
break;
if (size1 > size2)
{
switch (op)
{
CASE_RUN_KERNEL(kADD, 1)
CASE_RUN_KERNEL(kSUB, 1)
CASE_RUN_KERNEL(kMUL, 1)
CASE_RUN_KERNEL(kDIV, 1)
}
}
else
{
switch (op)
{
CASE_RUN_KERNEL(kADD, 2)
CASE_RUN_KERNEL(kSUB, 2)
CASE_RUN_KERNEL(kMUL, 2)
CASE_RUN_KERNEL(kDIV, 2)
}
}
#undef CASE_RUN_KERNEL
}
void runElementwiseFrontGradientDevice(const float* x1, size_t size1,
const float* x2, size_t size2,
const float* yGrad, float* x1Grad,
float* x2Grad, Elementwise op)
{
size_t size = size1 > size2 ? size1 : size2;
size_t reduceSize = size / (size1 < size2 ? size1 : size2);
const int BLOCK_SIZE = 256;
const int NUM_BLOCKS = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
float* temp;
cudaMalloc(&temp, size * sizeof(float));
if (size1 > size2)
{
switch (op)
{
case Elementwise::kADD:
elementwiseFrontGradientKernel<Elementwise::kADD, 0>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
case Elementwise::kSUB:
elementwiseFrontGradientKernel<Elementwise::kSUB, 0>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
case Elementwise::kMUL:
elementwiseFrontGradientKernel<Elementwise::kMUL, 0>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
case Elementwise::kDIV:
elementwiseFrontGradientKernel<Elementwise::kDIV, 0>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x1Grad, temp);
break;
}
reduce<ReduceOpCuda::kSUM>(temp, x2Grad, size2, reduceSize);
}
else
{
switch (op)
{
case Elementwise::kADD:
elementwiseFrontGradientKernel<Elementwise::kADD, 1>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
case Elementwise::kSUB:
elementwiseFrontGradientKernel<Elementwise::kSUB, 1>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
case Elementwise::kMUL:
elementwiseFrontGradientKernel<Elementwise::kMUL, 1>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
case Elementwise::kDIV:
elementwiseFrontGradientKernel<Elementwise::kDIV, 1>
<<<NUM_BLOCKS, BLOCK_SIZE>>>(x1, x2, size, reduceSize, yGrad,
x2Grad, temp);
break;
}
reduce<ReduceOpCuda::kSUM>(temp, x1Grad, size1, reduceSize);
}
cudaFree(temp);
}
} // namespace cuda
} // namespace layers
} // namespace core
} // namespace graphdl
|
6d07a8dd39c6312937cd038b0afa2085c51432df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void RBMDropoutMaskKernel( float *maskPtr, float dropout, int thisLayerSize )
{
int index = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (index < thisLayerSize)
{
maskPtr[index] = dropout < maskPtr[index];
}
} | 6d07a8dd39c6312937cd038b0afa2085c51432df.cu | #include "includes.h"
__global__ void RBMDropoutMaskKernel( float *maskPtr, float dropout, int thisLayerSize )
{
int index = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (index < thisLayerSize)
{
maskPtr[index] = dropout < maskPtr[index];
}
} |
fca3dfb70131a0c7b59baeb031cf42b4c4597606.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "rd_kernel.h"
#include <stdio.h>
#include <timer.h>
// CUDA timer definition
unsigned int timerCUDA = 0;
// global scope
// declare texture reference for 1D float texture
texture<float, 1> texU;
texture<float, 1> texV;
/*
* Utility function to initialize U and V
*/
__host__
void initializeConcentrations(unsigned int width, unsigned int height, float *U, float *V) {
float *_U = new float[width*height];
float *_V = new float[width*height];
int k = 0;
int i, j;
for (i = 0; i < width * height; ++i) {
_U[k] = 1.0f;
_V[k++] = 0.0f;
}
for (i = (0.48f)*height; i < (0.52f)*height; ++i) {
for (j = (0.48f)*width; j < (0.52f)*width; ++j) {
_U[ (i * width + j) ] = 0.5f;
_V[ (i * width + j) ] = 0.25f;
}
}
// Now perturb the entire grid. Bound the values by [0,1]
for (k = 0; k < width * height; ++k) {
if ( _U[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_U[k] += rRand * _U[k];
}
if ( _V[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_V[k] += rRand * _V[k];
}
}
// Upload initial state U and V to the GPU
hipMemcpy( U, _U, width*height*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( V, _V, width*height*sizeof(float), hipMemcpyHostToDevice );
delete[] _U;
delete[] _V;
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// REACTION-DIFFUSION KERNEL - Kim Bjerge's version
//
// done - Notes: - optimization - kernel without "if"
// done - Texture version for Mac
// done - Meassurments - time
// done - Ressourcer forbrug?
// Tile assymetrisk ?
// Kernel with shared memory how ?
// Use registeres to save current values of U and V
float Ui = U[idx];
float Vi = V[idx];
// Skip computing first and last line in image
if (idx >= width && idx < width*(height-1))
{
// Computes the Laplacian operator for U and V - used values in x and y dimensions
//float laplacianU = Ui;
//float laplacianV = Vi;
float laplacianU = (U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * Ui)/(dx*dx);
float laplacianV = (V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * Vi)/(dx*dx);
// Computes the diffusion and reaction of the two chemicals reactants mixed together
float Uf = Du * laplacianU - Ui*powf(Vi,2) + F*(1 - Ui);
//float Uf = Du * laplacianU; // Difusion only
float Vf = Dv * laplacianV + Ui*powf(Vi,2) - (F + k)*Vi;
U[idx] = Ui + dt*Uf;
V[idx] = Vi + dt*Vf;
}
}
/*
* Optimized kernel for the reaction-diffusion model
* Using texture memory for U and V
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel_tex(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// REACTION-DIFFUSION KERNEL - Kim Bjerge's version
//
// Use registeres to save current values of U and V
float Ui = tex1Dfetch(texU, idx);
float Vi = tex1Dfetch(texV, idx);
// Skip computing first and last line in image
if (idx >= width && idx < width*(height-1))
{
// Computes the Laplacian operator for U and V - used values in x and y dimensions
float laplacianU = (tex1Dfetch(texU, idx+1) + tex1Dfetch(texU,idx-1) + tex1Dfetch(texU, idx+width) + tex1Dfetch(texU, idx-width) - 4 * Ui)/(dx*dx);
float laplacianV = (tex1Dfetch(texV, idx+1) + tex1Dfetch(texV, idx-1) + tex1Dfetch(texV, idx+width) + tex1Dfetch(texV, idx-width) - 4 * Vi)/(dx*dx);
// Computes the diffusion and reaction of the two chemicals reactants mixed together
float Uf = Du * laplacianU - Ui*powf(Vi,2) + F*(1 - Ui);
float Vf = Dv * laplacianV + Ui*powf(Vi,2) - (F + k)*Vi;
U[idx] = Ui + dt*Uf;
V[idx] = Vi + dt*Vf;
}
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel_linux_opt2(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
// REACTION-DIFFUSION KERNEL - Optimized version 2
// Use registeres to save current values of U and V
// Skip computing first and last line in image
if (idx >= width && idx < width*(height-1))
{
U[idx] = U[idx] + dt*(Du * ((U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * U[idx])/(dx*dx)) - U[idx]*V[idx]*V[idx] + F*(1 - U[idx]));
V[idx] = V[idx] + dt*(Dv * ((V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * V[idx])/(dx*dx)) + U[idx]*V[idx]*V[idx] - (F + k)*V[idx]);
}
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
* Works only on Mini Mac ?
*/
__global__
void rd_kernel_opt1(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// REACTION-DIFFUSION KERNEL - Optimized version 1
//
// Use registeres to save current values of U and V
float Ui = U[idx];
float Vi = V[idx];
// Computes the Laplacian operator for U and V - used values in x and y dimensions
float laplacianU = (U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * Ui)/(dx*dx);
float laplacianV = (V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * Vi)/(dx*dx);
// Computes the diffusion and reaction of the two chemicals reactants mixed together
float Uf = Du * laplacianU - Ui*powf(Vi,2) + F*(1 - Ui);
float Vf = Dv * laplacianV + Ui*powf(Vi,2) - (F + k)*Vi;
// Needed since U and V values used by all threads in block
__syncthreads();
U[idx] = Ui + dt*Uf;
V[idx] = Vi + dt*Vf;
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
* Works only on Mini Mac
*/
__global__
void rd_kernel_opt2(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
// REACTION-DIFFUSION KERNEL - Optimized version 2
// Use registeres to save current values of U and V
U[idx] = U[idx] + dt*(Du * ((U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * U[idx])/(dx*dx)) - U[idx]*V[idx]*V[idx] + F*(1 - U[idx]));
V[idx] = V[idx] + dt*(Dv * ((V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * V[idx])/(dx*dx)) + U[idx]*V[idx]*V[idx] - (F + k)*V[idx]);
}
/*
* Wrapper for the reaction-diffusion kernel.
* Called every frame by 'display'
* 'result_devPtr' is a floating buffer used for visualization.
* Make sure whatever needs visualization goes there.
*/
extern "C" __host__
void rd(unsigned int width, unsigned int height, float *result_devPtr) {
// Create buffers for 'U' and 'V' at first pass
static float *U, *V;
static bool first_pass = true;
if (first_pass){
// Allocate device memory for U and V
hipMalloc((void**)&U, width*height*sizeof(float));
hipMalloc((void**)&V, width*height*sizeof(float));
// Check for Cuda errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("\nCuda error detected: %s. Quitting.\n", hipGetErrorString(err) ); fflush(stdout);
exit(1);
}
// Initialize U and V on the CPU and upload to the GPU
initializeConcentrations( width, height, U, V );
CreateTimer(&timerCUDA);
// Make sure we never get in here again...
first_pass = false;
}
// Kernel block dimensions
const dim3 blockDim(16,16);
// Verify input image dimensions
if (width%blockDim.x || height%blockDim.y) {
printf("\nImage width and height must be a multiple of the block dimensions\n");
exit(1);
}
// Experiment with different settings of these constants
/* Original values
const float dt = 1.0f;
const float dx = 2.0f;
const float Du = 0.0004f*((width*height)/100.0f);
const float Dv = 0.0002f*((width*height)/100.0f);
const float F = 0.012f;
const float k = 0.052f;
*/
const float dt = 0.2f;
const float dx = 2.0f;
const float Du = 0.0004f*((width*height)/100.0f);
const float Dv = 0.0002f*((width*height)/100.0f); // Impact on how fast V diffusses (0.0001 or 0.0002)
const float F = 0.012f;
const float k = 0.052f;
// Invoke kernel (update U and V)
#if 1 // Optimized skipping top and bottom edges
RestartTimer(timerCUDA);
//rd_kernel<<< dim3(width/blockDim.x, height/blockDim.y), blockDim >>>( width, height, dt, dx, Du, Dv, F, k, U, V );
hipLaunchKernelGGL(( rd_kernel_linux_opt2), dim3(dim3(width/blockDim.x, height/blockDim.y)), dim3(blockDim) , 0, 0, width, height, dt, dx, Du, Dv, F, k, U, V );
// Only working on miniMac
//rd_kernel_opt1<<< dim3(width/blockDim.x, (height-2)/blockDim.y), blockDim >>>( width, height-2, dt, dx, Du, Dv, F, k, &U[width], &V[width] );
//rd_kernel_opt2<<< dim3(width/blockDim.x, (height-2)/blockDim.y), blockDim >>>( width, height-2, dt, dx, Du, Dv, F, k, &U[width], &V[width] );
StopTimer(timerCUDA);
float average = GetAverage(timerCUDA);
if (average > 0)
printf("Opt2 %f ms\n", average);
#endif
#if 0 // Optimized with texture memory
// Create texture for U matrix
const hipChannelFormatDesc descU = hipCreateChannelDesc<float>();
size_t numU_bytes = width*height*sizeof(float);
hipBindTexture(NULL, &texU, (const void*)U, &descU, numU_bytes);
// Create texture for V matrix
const hipChannelFormatDesc descV = hipCreateChannelDesc<float>();
size_t numV_bytes = width*height*sizeof(float);
hipBindTexture(NULL, &texV, (const void*)V, &descV, numV_bytes);
RestartTimer(timerCUDA);
hipLaunchKernelGGL(( rd_kernel_tex), dim3(dim3(width/blockDim.x, height/blockDim.y)), dim3(blockDim) , 0, 0, width, height, dt, dx, Du, Dv, F, k, U, V);
StopTimer(timerCUDA);
float average = GetAverage(timerCUDA);
if (average > 0)
printf("Tex %f ms\n", average);
hipUnbindTexture(texU);
hipUnbindTexture(texV);
#endif
// Check for errors
hipError_t err = hipGetLastError();
if( err != hipSuccess ){
printf("\nCuda error detected in 'rd_kernel': %s. Quitting.\n", hipGetErrorString(err) ); fflush(stdout);
exit(1);
}
// For visualization we use a 'float1' image. You can use either 'U' or 'V'.
hipMemcpy( result_devPtr, V, width*height*sizeof(float), hipMemcpyDeviceToDevice );
}
| fca3dfb70131a0c7b59baeb031cf42b4c4597606.cu | #include "rd_kernel.h"
#include <stdio.h>
#include <timer.h>
// CUDA timer definition
unsigned int timerCUDA = 0;
// global scope
// declare texture reference for 1D float texture
texture<float, 1> texU;
texture<float, 1> texV;
/*
* Utility function to initialize U and V
*/
__host__
void initializeConcentrations(unsigned int width, unsigned int height, float *U, float *V) {
float *_U = new float[width*height];
float *_V = new float[width*height];
int k = 0;
int i, j;
for (i = 0; i < width * height; ++i) {
_U[k] = 1.0f;
_V[k++] = 0.0f;
}
for (i = (0.48f)*height; i < (0.52f)*height; ++i) {
for (j = (0.48f)*width; j < (0.52f)*width; ++j) {
_U[ (i * width + j) ] = 0.5f;
_V[ (i * width + j) ] = 0.25f;
}
}
// Now perturb the entire grid. Bound the values by [0,1]
for (k = 0; k < width * height; ++k) {
if ( _U[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_U[k] += rRand * _U[k];
}
if ( _V[k] < 1.0f ) {
float rRand = 0.02f*(float)rand() / RAND_MAX - 0.01f;
_V[k] += rRand * _V[k];
}
}
// Upload initial state U and V to the GPU
cudaMemcpy( U, _U, width*height*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( V, _V, width*height*sizeof(float), cudaMemcpyHostToDevice );
delete[] _U;
delete[] _V;
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// REACTION-DIFFUSION KERNEL - Kim Bjerge's version
//
// done - Notes: - optimization - kernel without "if"
// done - Texture version for Mac
// done - Meassurments - time
// done - Ressourcer forbrug?
// Tile assymetrisk ?
// Kernel with shared memory how ?
// Use registeres to save current values of U and V
float Ui = U[idx];
float Vi = V[idx];
// Skip computing first and last line in image
if (idx >= width && idx < width*(height-1))
{
// Computes the Laplacian operator for U and V - used values in x and y dimensions
//float laplacianU = Ui;
//float laplacianV = Vi;
float laplacianU = (U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * Ui)/(dx*dx);
float laplacianV = (V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * Vi)/(dx*dx);
// Computes the diffusion and reaction of the two chemicals reactants mixed together
float Uf = Du * laplacianU - Ui*powf(Vi,2) + F*(1 - Ui);
//float Uf = Du * laplacianU; // Difusion only
float Vf = Dv * laplacianV + Ui*powf(Vi,2) - (F + k)*Vi;
U[idx] = Ui + dt*Uf;
V[idx] = Vi + dt*Vf;
}
}
/*
* Optimized kernel for the reaction-diffusion model
* Using texture memory for U and V
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel_tex(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// REACTION-DIFFUSION KERNEL - Kim Bjerge's version
//
// Use registeres to save current values of U and V
float Ui = tex1Dfetch(texU, idx);
float Vi = tex1Dfetch(texV, idx);
// Skip computing first and last line in image
if (idx >= width && idx < width*(height-1))
{
// Computes the Laplacian operator for U and V - used values in x and y dimensions
float laplacianU = (tex1Dfetch(texU, idx+1) + tex1Dfetch(texU,idx-1) + tex1Dfetch(texU, idx+width) + tex1Dfetch(texU, idx-width) - 4 * Ui)/(dx*dx);
float laplacianV = (tex1Dfetch(texV, idx+1) + tex1Dfetch(texV, idx-1) + tex1Dfetch(texV, idx+width) + tex1Dfetch(texV, idx-width) - 4 * Vi)/(dx*dx);
// Computes the diffusion and reaction of the two chemicals reactants mixed together
float Uf = Du * laplacianU - Ui*powf(Vi,2) + F*(1 - Ui);
float Vf = Dv * laplacianV + Ui*powf(Vi,2) - (F + k)*Vi;
U[idx] = Ui + dt*Uf;
V[idx] = Vi + dt*Vf;
}
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
*/
__global__
void rd_kernel_linux_opt2(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
// REACTION-DIFFUSION KERNEL - Optimized version 2
// Use registeres to save current values of U and V
// Skip computing first and last line in image
if (idx >= width && idx < width*(height-1))
{
U[idx] = U[idx] + dt*(Du * ((U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * U[idx])/(dx*dx)) - U[idx]*V[idx]*V[idx] + F*(1 - U[idx]));
V[idx] = V[idx] + dt*(Dv * ((V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * V[idx])/(dx*dx)) + U[idx]*V[idx]*V[idx] - (F + k)*V[idx]);
}
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
* Works only on Mini Mac ?
*/
__global__
void rd_kernel_opt1(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
//
// REACTION-DIFFUSION KERNEL - Optimized version 1
//
// Use registeres to save current values of U and V
float Ui = U[idx];
float Vi = V[idx];
// Computes the Laplacian operator for U and V - used values in x and y dimensions
float laplacianU = (U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * Ui)/(dx*dx);
float laplacianV = (V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * Vi)/(dx*dx);
// Computes the diffusion and reaction of the two chemicals reactants mixed together
float Uf = Du * laplacianU - Ui*powf(Vi,2) + F*(1 - Ui);
float Vf = Dv * laplacianV + Ui*powf(Vi,2) - (F + k)*Vi;
// Needed since U and V values used by all threads in block
__syncthreads();
U[idx] = Ui + dt*Uf;
V[idx] = Vi + dt*Vf;
}
/*
* Kernel for the reaction-diffusion model
* This kernel is responsible for updating 'U' and 'V'
* Works only on Mini Mac
*/
__global__
void rd_kernel_opt2(unsigned int width, unsigned int height,
float dt, float dx, float Du, float Dv,
float F, float k, float *U, float *V) {
// Coordinate of the current pixel (for this thread)
const uint2 co = make_uint2( blockIdx.x*blockDim.x + threadIdx.x,
blockIdx.y*blockDim.y + threadIdx.y );
// Linear index of the curernt pixel
const unsigned int idx = co.y*width + co.x;
// REACTION-DIFFUSION KERNEL - Optimized version 2
// Use registeres to save current values of U and V
U[idx] = U[idx] + dt*(Du * ((U[idx+1] + U[idx-1] + U[idx+width] + U[idx-width] - 4 * U[idx])/(dx*dx)) - U[idx]*V[idx]*V[idx] + F*(1 - U[idx]));
V[idx] = V[idx] + dt*(Dv * ((V[idx+1] + V[idx-1] + V[idx+width] + V[idx-width] - 4 * V[idx])/(dx*dx)) + U[idx]*V[idx]*V[idx] - (F + k)*V[idx]);
}
/*
* Wrapper for the reaction-diffusion kernel.
* Called every frame by 'display'
* 'result_devPtr' is a floating buffer used for visualization.
* Make sure whatever needs visualization goes there.
*/
extern "C" __host__
void rd(unsigned int width, unsigned int height, float *result_devPtr) {
// Create buffers for 'U' and 'V' at first pass
static float *U, *V;
static bool first_pass = true;
if (first_pass){
// Allocate device memory for U and V
cudaMalloc((void**)&U, width*height*sizeof(float));
cudaMalloc((void**)&V, width*height*sizeof(float));
// Check for Cuda errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("\nCuda error detected: %s. Quitting.\n", cudaGetErrorString(err) ); fflush(stdout);
exit(1);
}
// Initialize U and V on the CPU and upload to the GPU
initializeConcentrations( width, height, U, V );
CreateTimer(&timerCUDA);
// Make sure we never get in here again...
first_pass = false;
}
// Kernel block dimensions
const dim3 blockDim(16,16);
// Verify input image dimensions
if (width%blockDim.x || height%blockDim.y) {
printf("\nImage width and height must be a multiple of the block dimensions\n");
exit(1);
}
// Experiment with different settings of these constants
/* Original values
const float dt = 1.0f;
const float dx = 2.0f;
const float Du = 0.0004f*((width*height)/100.0f);
const float Dv = 0.0002f*((width*height)/100.0f);
const float F = 0.012f;
const float k = 0.052f;
*/
const float dt = 0.2f;
const float dx = 2.0f;
const float Du = 0.0004f*((width*height)/100.0f);
const float Dv = 0.0002f*((width*height)/100.0f); // Impact on how fast V diffusses (0.0001 or 0.0002)
const float F = 0.012f;
const float k = 0.052f;
// Invoke kernel (update U and V)
#if 1 // Optimized skipping top and bottom edges
RestartTimer(timerCUDA);
//rd_kernel<<< dim3(width/blockDim.x, height/blockDim.y), blockDim >>>( width, height, dt, dx, Du, Dv, F, k, U, V );
rd_kernel_linux_opt2<<< dim3(width/blockDim.x, height/blockDim.y), blockDim >>>( width, height, dt, dx, Du, Dv, F, k, U, V );
// Only working on miniMac
//rd_kernel_opt1<<< dim3(width/blockDim.x, (height-2)/blockDim.y), blockDim >>>( width, height-2, dt, dx, Du, Dv, F, k, &U[width], &V[width] );
//rd_kernel_opt2<<< dim3(width/blockDim.x, (height-2)/blockDim.y), blockDim >>>( width, height-2, dt, dx, Du, Dv, F, k, &U[width], &V[width] );
StopTimer(timerCUDA);
float average = GetAverage(timerCUDA);
if (average > 0)
printf("Opt2 %f ms\n", average);
#endif
#if 0 // Optimized with texture memory
// Create texture for U matrix
const cudaChannelFormatDesc descU = cudaCreateChannelDesc<float>();
size_t numU_bytes = width*height*sizeof(float);
cudaBindTexture(NULL, &texU, (const void*)U, &descU, numU_bytes);
// Create texture for V matrix
const cudaChannelFormatDesc descV = cudaCreateChannelDesc<float>();
size_t numV_bytes = width*height*sizeof(float);
cudaBindTexture(NULL, &texV, (const void*)V, &descV, numV_bytes);
RestartTimer(timerCUDA);
rd_kernel_tex<<< dim3(width/blockDim.x, height/blockDim.y), blockDim >>>( width, height, dt, dx, Du, Dv, F, k, U, V);
StopTimer(timerCUDA);
float average = GetAverage(timerCUDA);
if (average > 0)
printf("Tex %f ms\n", average);
cudaUnbindTexture(texU);
cudaUnbindTexture(texV);
#endif
// Check for errors
cudaError_t err = cudaGetLastError();
if( err != cudaSuccess ){
printf("\nCuda error detected in 'rd_kernel': %s. Quitting.\n", cudaGetErrorString(err) ); fflush(stdout);
exit(1);
}
// For visualization we use a 'float1' image. You can use either 'U' or 'V'.
cudaMemcpy( result_devPtr, V, width*height*sizeof(float), cudaMemcpyDeviceToDevice );
}
|
fab6e2d19a735248a2a1be2776e7092f29ff0429.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
//declare texture reference
surface<void,cudaSurfaceType3D> surfReference_A;
surface<void,cudaSurfaceType3D> surfReference_B;
surface<void,cudaSurfaceType3D> surfReference_C;
// kernel function
__global__ void kernel(int alto, int ancho, int prof)
{
int xIndex;
int yIndex;
int zIndex;
//calculate each thread global index
xIndex = threadIdx.x + blockIdx.x * blockDim.x;
yIndex = threadIdx.y + blockIdx.y * blockDim.y;
zIndex = threadIdx.z + blockIdx.z * blockDim.z;
float data1,data2;
surf3Dread(&data1, surfReference_A, xIndex*4, yIndex, zIndex, hipBoundaryModeTrap);
surf3Dread(&data2, surfReference_B, xIndex*4, yIndex, zIndex, hipBoundaryModeTrap);
surf3Dwrite(data1 + data2, surfReference_C, xIndex*4, yIndex, zIndex,hipBoundaryModeTrap);
printf(" %f \n",data);
//surf3Dwrite(data1 + data2, surfReference_C, xIndex*4, yIndex, zIndex);
}
int main(int argc, char* argv[])
{
float *A_host;
float *B_host;
float *C_host;
hipArray *cudaArray_A;
hipArray *cudaArray_B;
hipArray *cudaArray_C;
hipExtent volumeSize_A;
hipExtent volumeSize_B;
hipExtent volumeSize_C;
hipChannelFormatDesc channel_A;
hipChannelFormatDesc channel_B;
hipChannelFormatDesc channel_C;
hipMemcpy3DParms copyparms_A={0};
hipMemcpy3DParms copyparms_B={0};
hipMemcpy3DParms copyparms_C={0};
int alto,ancho,prof;
alto = atoi(argv[1]);
ancho = atoi(argv[2]);
prof = atoi(argv[3]);
//allocate host and device memory
A_host = (float*)malloc(sizeof(float)*alto*ancho*prof);
B_host = (float*)malloc(sizeof(float)*alto*ancho*prof);
C_host = (float*)malloc(sizeof(float)*alto*ancho*prof);
//initialize A_host matrix before usage
for(int loop=0; loop<alto*ancho*prof;loop++)
A_host[loop] = (float)(rand() % 3);
//initialize B_host matrix before usage
for(int loop=0; loop<alto*ancho*prof;loop++)
B_host[loop] = (float)(rand() % 5);
printf("A_host:\n");
for(int i = 0;i<prof;i++)
{
for(int j = 0;j<alto;j++)
{
for(int k = 0;k<ancho;k++)
{
printf(" %f ",A_host[i*alto*ancho + j*ancho + k]);
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
printf("B_host:\n");
for(int i = 0;i<prof;i++)
{
for(int j = 0;j<alto;j++)
{
for(int k = 0;k<ancho;k++)
{
printf(" %f ",B_host[i*alto*ancho + j*ancho + k]);
}
printf("\n");
}
printf("\n\n");
}
//set cuda array volume size
volumeSize_A = make_hipExtent(ancho,alto,prof);
volumeSize_B = make_hipExtent(ancho,alto,prof);
volumeSize_C = make_hipExtent(ancho,alto,prof);
//create channel to describe data type
channel_A = hipCreateChannelDesc<float>();
channel_B = hipCreateChannelDesc<float>();
channel_C = hipCreateChannelDesc<float>();
//allocate device memory for cuda array
hipMalloc3DArray(&cudaArray_A,&channel_A,volumeSize_A,hipArraySurfaceLoadStore);
hipMalloc3DArray(&cudaArray_B,&channel_B,volumeSize_B,hipArraySurfaceLoadStore);
hipMalloc3DArray(&cudaArray_C,&channel_C,volumeSize_C,hipArraySurfaceLoadStore);
//set cuda array copy parameters
copyparms_A.extent = volumeSize_A;
copyparms_A.dstArray = cudaArray_A;
copyparms_A.kind = hipMemcpyHostToDevice;
copyparms_B.extent = volumeSize_B;
copyparms_B.dstArray = cudaArray_B;
copyparms_B.kind = hipMemcpyHostToDevice;
// 3D copy from host_CubeMatrix to hipArray
copyparms_A.srcPtr = make_hipPitchedPtr((void*)A_host,ancho*sizeof(float),ancho,alto);
hipMemcpy3D(©parms_A);
copyparms_B.srcPtr = make_hipPitchedPtr((void*)B_host,ancho*sizeof(float),ancho,alto);
hipMemcpy3D(©parms_B);
//bind texture reference with cuda array
hipBindSurfaceToArray(surfReference_A,cudaArray_A, channel_A);
hipBindSurfaceToArray(surfReference_B,cudaArray_B, channel_B);
hipBindSurfaceToArray(surfReference_C,cudaArray_C, channel_C);
// preparing kernel launch
dim3 blockDim; dim3 gridDim;
blockDim.x = ancho; blockDim.y = alto; blockDim.z = prof;
gridDim.x = 1; gridDim.y = 1; gridDim.z = 1;
//execute device kernel
hipLaunchKernelGGL(( kernel), dim3(gridDim) , dim3(blockDim) , 0, 0, alto, ancho, prof);
hipDeviceSynchronize();
//unbind texture reference to free resource
//copy result matrix from device to host memory
//cudaMemcpyFromArray(C_host, cudaArray_C, 0, 0, sizeof(float)*alto*ancho*prof, hipMemcpyDeviceToHost);
//set cuda array copy parameters
copyparms_C.srcArray = cudaArray_C;
copyparms_C.extent = volumeSize_C;
copyparms_C.dstPtr.ptr = C_host;
copyparms_C.kind = hipMemcpyDeviceToHost;
// 3D copy from host_CubeMatrix to hipArray
//copyparms_C.srcPtr = make_hipPitchedPtr((void*)cudaArray_C,ancho*sizeof(float),ancho,alto);
hipMemcpy3D(©parms_C);
printf("\n");
printf("C_host:\n");
for(int i = 0;i<prof;i++)
{
for(int j = 0;j<alto;j++)
{
for(int k = 0;k<ancho;k++)
{
printf(" %f ",C_host[i*alto*ancho + j*ancho + k]);
}
printf("\n");
}
printf("\n\n");
}
//free host and device memory
free(A_host);
free(B_host);
free(C_host);
hipFreeArray(cudaArray_A);
hipFreeArray(cudaArray_B);
hipFreeArray(cudaArray_C);
}
| fab6e2d19a735248a2a1be2776e7092f29ff0429.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <math.h>
//declare texture reference
surface<void,cudaSurfaceType3D> surfReference_A;
surface<void,cudaSurfaceType3D> surfReference_B;
surface<void,cudaSurfaceType3D> surfReference_C;
// kernel function
__global__ void kernel(int alto, int ancho, int prof)
{
int xIndex;
int yIndex;
int zIndex;
//calculate each thread global index
xIndex = threadIdx.x + blockIdx.x * blockDim.x;
yIndex = threadIdx.y + blockIdx.y * blockDim.y;
zIndex = threadIdx.z + blockIdx.z * blockDim.z;
float data1,data2;
surf3Dread(&data1, surfReference_A, xIndex*4, yIndex, zIndex, cudaBoundaryModeTrap);
surf3Dread(&data2, surfReference_B, xIndex*4, yIndex, zIndex, cudaBoundaryModeTrap);
surf3Dwrite(data1 + data2, surfReference_C, xIndex*4, yIndex, zIndex,cudaBoundaryModeTrap);
printf(" %f \n",data);
//surf3Dwrite(data1 + data2, surfReference_C, xIndex*4, yIndex, zIndex);
}
int main(int argc, char* argv[])
{
float *A_host;
float *B_host;
float *C_host;
cudaArray *cudaArray_A;
cudaArray *cudaArray_B;
cudaArray *cudaArray_C;
cudaExtent volumeSize_A;
cudaExtent volumeSize_B;
cudaExtent volumeSize_C;
cudaChannelFormatDesc channel_A;
cudaChannelFormatDesc channel_B;
cudaChannelFormatDesc channel_C;
cudaMemcpy3DParms copyparms_A={0};
cudaMemcpy3DParms copyparms_B={0};
cudaMemcpy3DParms copyparms_C={0};
int alto,ancho,prof;
alto = atoi(argv[1]);
ancho = atoi(argv[2]);
prof = atoi(argv[3]);
//allocate host and device memory
A_host = (float*)malloc(sizeof(float)*alto*ancho*prof);
B_host = (float*)malloc(sizeof(float)*alto*ancho*prof);
C_host = (float*)malloc(sizeof(float)*alto*ancho*prof);
//initialize A_host matrix before usage
for(int loop=0; loop<alto*ancho*prof;loop++)
A_host[loop] = (float)(rand() % 3);
//initialize B_host matrix before usage
for(int loop=0; loop<alto*ancho*prof;loop++)
B_host[loop] = (float)(rand() % 5);
printf("A_host:\n");
for(int i = 0;i<prof;i++)
{
for(int j = 0;j<alto;j++)
{
for(int k = 0;k<ancho;k++)
{
printf(" %f ",A_host[i*alto*ancho + j*ancho + k]);
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
printf("B_host:\n");
for(int i = 0;i<prof;i++)
{
for(int j = 0;j<alto;j++)
{
for(int k = 0;k<ancho;k++)
{
printf(" %f ",B_host[i*alto*ancho + j*ancho + k]);
}
printf("\n");
}
printf("\n\n");
}
//set cuda array volume size
volumeSize_A = make_cudaExtent(ancho,alto,prof);
volumeSize_B = make_cudaExtent(ancho,alto,prof);
volumeSize_C = make_cudaExtent(ancho,alto,prof);
//create channel to describe data type
channel_A = cudaCreateChannelDesc<float>();
channel_B = cudaCreateChannelDesc<float>();
channel_C = cudaCreateChannelDesc<float>();
//allocate device memory for cuda array
cudaMalloc3DArray(&cudaArray_A,&channel_A,volumeSize_A,cudaArraySurfaceLoadStore);
cudaMalloc3DArray(&cudaArray_B,&channel_B,volumeSize_B,cudaArraySurfaceLoadStore);
cudaMalloc3DArray(&cudaArray_C,&channel_C,volumeSize_C,cudaArraySurfaceLoadStore);
//set cuda array copy parameters
copyparms_A.extent = volumeSize_A;
copyparms_A.dstArray = cudaArray_A;
copyparms_A.kind = cudaMemcpyHostToDevice;
copyparms_B.extent = volumeSize_B;
copyparms_B.dstArray = cudaArray_B;
copyparms_B.kind = cudaMemcpyHostToDevice;
// 3D copy from host_CubeMatrix to cudaArray
copyparms_A.srcPtr = make_cudaPitchedPtr((void*)A_host,ancho*sizeof(float),ancho,alto);
cudaMemcpy3D(©parms_A);
copyparms_B.srcPtr = make_cudaPitchedPtr((void*)B_host,ancho*sizeof(float),ancho,alto);
cudaMemcpy3D(©parms_B);
//bind texture reference with cuda array
cudaBindSurfaceToArray(surfReference_A,cudaArray_A, channel_A);
cudaBindSurfaceToArray(surfReference_B,cudaArray_B, channel_B);
cudaBindSurfaceToArray(surfReference_C,cudaArray_C, channel_C);
// preparing kernel launch
dim3 blockDim; dim3 gridDim;
blockDim.x = ancho; blockDim.y = alto; blockDim.z = prof;
gridDim.x = 1; gridDim.y = 1; gridDim.z = 1;
//execute device kernel
kernel<<< gridDim , blockDim >>>(alto, ancho, prof);
cudaThreadSynchronize();
//unbind texture reference to free resource
//copy result matrix from device to host memory
//cudaMemcpyFromArray(C_host, cudaArray_C, 0, 0, sizeof(float)*alto*ancho*prof, cudaMemcpyDeviceToHost);
//set cuda array copy parameters
copyparms_C.srcArray = cudaArray_C;
copyparms_C.extent = volumeSize_C;
copyparms_C.dstPtr.ptr = C_host;
copyparms_C.kind = cudaMemcpyDeviceToHost;
// 3D copy from host_CubeMatrix to cudaArray
//copyparms_C.srcPtr = make_cudaPitchedPtr((void*)cudaArray_C,ancho*sizeof(float),ancho,alto);
cudaMemcpy3D(©parms_C);
printf("\n");
printf("C_host:\n");
for(int i = 0;i<prof;i++)
{
for(int j = 0;j<alto;j++)
{
for(int k = 0;k<ancho;k++)
{
printf(" %f ",C_host[i*alto*ancho + j*ancho + k]);
}
printf("\n");
}
printf("\n\n");
}
//free host and device memory
free(A_host);
free(B_host);
free(C_host);
cudaFreeArray(cudaArray_A);
cudaFreeArray(cudaArray_B);
cudaFreeArray(cudaArray_C);
}
|
d137a0aca81403aebf6b5ce1c641d40f2a9964f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint loc
)
{
d_Data[0] = d_Src[loc-1];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
checkCudaErrors(hipMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)));
checkCudaErrors(hipMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(hipFree(d_Buf));
checkCudaErrors(hipFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert(arrayLength % (4 * THREADBLOCK_SIZE) == 0);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
hipLaunchKernelGGL(( uniformUpdate), dim3((arrayLength) / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((arrayLength) <= MAX_BATCH_ELEMENTS);
hipLaunchKernelGGL(( scanExclusiveShared), dim3(arrayLength / (4 * THREADBLOCK_SIZE)), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp (arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared2), dim3(blockCount2), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
hipLaunchKernelGGL(( scanExclusiveShared3), dim3(blockCount3), dim3(THREADBLOCK_SIZE), 0, 0,
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
//add d_Buf to each array of d_Dst
hipLaunchKernelGGL(( uniformUpdate), dim3(arrayLength / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( uniformUpdate2), dim3(arrayLength / (4 * THREADBLOCK_SIZE )), dim3(THREADBLOCK_SIZE), 0, 0,
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( diff_kernel), dim3(blockCount), dim3(THREADBLOCK_SIZE), 0, 0,
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
return THREADBLOCK_SIZE;
}
//transport input data to output per diff
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint loc
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = 1;//iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
hipLaunchKernelGGL(( transport_kernel), dim3(blockCount), dim3(1), 0, 0,
d_Dst,
d_Src,
loc
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(hipDeviceSynchronize());
}
| d137a0aca81403aebf6b5ce1c641d40f2a9964f8.cu | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <helper_cuda.h>
#include "scan_common.h"
//All three kernels run 512 threads per workgroup
//Must be a power of two
#define THREADBLOCK_SIZE 512
#define LOOP_PERTHREAD 16
#define LOOP_PERTHREAD2 16
////////////////////////////////////////////////////////////////////////////////
// Basic ccan codelets
////////////////////////////////////////////////////////////////////////////////
//Naive inclusive scan: O(N * log2(N)) operations
//Allocate 2 * 'size' local memory, initialize the first half
//with 'size' zeros avoiding if(pos >= offset) condition evaluation
//and saving instructions
inline __device__ uint scan1Inclusive(uint idata, volatile uint *s_Data, uint size)
{
uint pos = 2 * threadIdx.x - (threadIdx.x & (size - 1));
s_Data[pos] = 0;
pos += size;
s_Data[pos] = idata;
for (uint offset = 1; offset < size; offset <<= 1)
{
__syncthreads();
uint t = s_Data[pos] + s_Data[pos - offset];
__syncthreads();
s_Data[pos] = t;
}
return s_Data[pos];
}
inline __device__ uint scan1Exclusive(uint idata, volatile uint *s_Data, uint size)
{
return scan1Inclusive(idata, s_Data, size) - idata;
}
inline __device__ uint4 scan4Inclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
//Level-0 inclusive scan
idata4.y += idata4.x;
idata4.z += idata4.y;
idata4.w += idata4.z;
//Level-1 exclusive scan
uint oval = scan1Exclusive(idata4.w, s_Data, size / 4);
idata4.x += oval;
idata4.y += oval;
idata4.z += oval;
idata4.w += oval;
return idata4;
}
//Exclusive vector scan: the array to be scanned is stored
//in local thread memory scope as uint4
inline __device__ uint4 scan4Exclusive(uint4 idata4, volatile uint *s_Data, uint size)
{
uint4 odata4 = scan4Inclusive(idata4, s_Data, size);
odata4.x -= idata4.x;
odata4.y -= idata4.y;
odata4.z -= idata4.z;
odata4.w -= idata4.w;
return odata4;
}
////////////////////////////////////////////////////////////////////////////////
// Scan kernels
////////////////////////////////////////////////////////////////////////////////
__global__ void scanExclusiveShared(
uint4 *d_Dst,
uint4 *d_Src,
uint size
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load data
uint4 idata4 = d_Src[pos];
//Calculate exclusive scan
uint4 odata4 = scan4Exclusive(idata4, s_Data, size);
//Write back
d_Dst[pos] = odata4;
}
//Exclusive scan of top elements of bottom-level scans (4 * THREADBLOCK_SIZE)
__global__ void scanExclusiveShared2(
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Dst[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
d_Buf[pos] = odata;
}
}
__global__ void scanExclusiveShared3(
uint *e_Buf,
uint *d_Buf,
uint *d_Dst,
uint *d_Src,
uint N,
uint arrayLength
)
{
__shared__ uint s_Data[2 * THREADBLOCK_SIZE];
//Skip loads and stores for inactive threads of last threadblock (pos >= N)
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
//Load top elements
//Convert results of bottom-level scan back to inclusive
uint idata = 0;
if (pos < N)
idata =
d_Buf[THREADBLOCK_SIZE -1 + pos * THREADBLOCK_SIZE] + d_Dst[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos] + d_Src[(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) - 1 + (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE) * pos];
//Compute
uint odata = scan1Exclusive(idata, s_Data, arrayLength);
//Avoid out-of-bound access
if (pos < N)
{
e_Buf[pos] = odata;
}
}
//Final step of large-array scan: combine basic inclusive scan with exclusive scan of top elements of input arrays
__global__ void uniformUpdate(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0)
{
buf = d_Buffer[blockIdx.x];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void uniformUpdate2(
uint4 *d_Data,
uint *d_Buffer
)
{
__shared__ uint buf;
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint temp = blockIdx.x/THREADBLOCK_SIZE;
if (threadIdx.x == 0)
{
buf = d_Buffer[temp];
}
__syncthreads();
uint4 data4 = d_Data[pos];
data4.x += buf;
data4.y += buf;
data4.z += buf;
data4.w += buf;
d_Data[pos] = data4;
}
__global__ void diff_kernel(
uint *d_Data,
uint *d_Src,
uint pnum,
uint length,
uint size
)
{
uint pos = blockIdx.x * blockDim.x + threadIdx.x;
uint p_n = pnum;
uint len = length;
uint POS = pos * LOOP_PERTHREAD;
uint i;
for(i = POS ; (i < POS + LOOP_PERTHREAD)&&(i < len-1); i++){
d_Data[i] = d_Src[(i+1)*p_n] - d_Src[i * p_n];
}
if(i == (len-1)){
d_Data[len-1] = size - d_Src[(len-1)*p_n];
}
}
__global__ void transport_kernel(
uint *d_Data,
uint *d_Src,
uint loc
)
{
d_Data[0] = d_Src[loc-1];
}
////////////////////////////////////////////////////////////////////////////////
// Interface function
////////////////////////////////////////////////////////////////////////////////
//Derived as 32768 (max power-of-two gridDim.x) * 4 * THREADBLOCK_SIZE
//Due to scanExclusiveShared<<<>>>() 1D block addressing
extern "C" const uint MAX_BATCH_ELEMENTS = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_SHORT_ARRAY_SIZE = 4;
extern "C" const uint MAX_SHORT_ARRAY_SIZE = 4 * THREADBLOCK_SIZE;
extern "C" const uint MIN_LARGE_ARRAY_SIZE = 8 * THREADBLOCK_SIZE;
extern "C" const uint MAX_LARGE_ARRAY_SIZE = 4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MIN_LL_SIZE = 8 * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
extern "C" const uint MAX_LL_SIZE = MAX_BATCH_ELEMENTS;//4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE * THREADBLOCK_SIZE;
//Internal exclusive scan buffer
static uint *d_Buf;
static uint *e_Buf;
extern "C" void initScan(void)
{
checkCudaErrors(cudaMalloc((void **)&d_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE)) * sizeof(uint)));
checkCudaErrors(cudaMalloc((void **)&e_Buf, (MAX_BATCH_ELEMENTS / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)) * sizeof(uint)));
}
extern "C" void closeScan(void)
{
checkCudaErrors(cudaFree(d_Buf));
checkCudaErrors(cudaFree(e_Buf));
}
static uint factorRadix2(uint &log2L, uint L)
{
if (!L)
{
log2L = 0;
return 0;
}
else
{
for (log2L = 0; (L & 1) == 0; L >>= 1, log2L++);
return L;
}
}
static uint iDivUp(uint dividend, uint divisor)
{
return ((dividend % divisor) == 0) ? (dividend / divisor) : (dividend / divisor + 1);
}
extern "C" size_t scanExclusiveShort(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
//Check supported size range
assert((arrayLength >= MIN_SHORT_ARRAY_SIZE) && (arrayLength <= MAX_SHORT_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
//Check all threadblocks to be fully packed with data
assert(arrayLength % (4 * THREADBLOCK_SIZE) == 0);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
arrayLength
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLarge(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert(arrayLength%MAX_SHORT_ARRAY_SIZE == 0);
//Check supported size range
assert((arrayLength >= MIN_LARGE_ARRAY_SIZE) && (arrayLength <= MAX_LARGE_ARRAY_SIZE));
//Check total batch size limit
assert(arrayLength <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount2 = 1;//iDivUp((batchSize * arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
uniformUpdate<<<(arrayLength) / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
return THREADBLOCK_SIZE;
}
extern "C" size_t scanExclusiveLL(
uint *d_Dst,
uint *d_Src,
uint arrayLength
)
{
//Check power-of-two factorization
/*
uint log2L;
uint factorizationRemainder = factorRadix2(log2L, arrayLength);
assert(factorizationRemainder == 1);
*/
assert((arrayLength%MAX_LARGE_ARRAY_SIZE) == 0);
//Check supported size range
assert((arrayLength >= MIN_LL_SIZE) && (arrayLength <= MAX_LL_SIZE));
//Check total batch size limit
assert((arrayLength) <= MAX_BATCH_ELEMENTS);
scanExclusiveShared<<<arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint4 *)d_Src,
4 * THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//Now ,prefix sum per THREADBLOCK_SIZE done
//Not all threadblocks need to be packed with input data:
//inactive threads of highest threadblock just don't do global reads and writes
const uint blockCount2 = iDivUp (arrayLength / (4 * THREADBLOCK_SIZE), THREADBLOCK_SIZE);
scanExclusiveShared2<<< blockCount2, THREADBLOCK_SIZE>>>(
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE),
THREADBLOCK_SIZE
);
getLastCudaError("scanExclusiveShared2() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//prefix sum of last elements per THREADBLOCK_SIZE done
//this prefix sum can caluculate under only THREADBLOCK_SIZE size.
//so We need one more prefix sum for last elements.
uint array_temp = THREADBLOCK_SIZE;
for(uint i = 2; i<=THREADBLOCK_SIZE ; i <<= 1){
if(i >= arrayLength/(4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE)){
array_temp = i;
break;
}
}
const uint blockCount3 = 1;//(batchSize * arrayLength) / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE);
scanExclusiveShared3<<< blockCount3, THREADBLOCK_SIZE>>>(
(uint *)e_Buf,
(uint *)d_Buf,
(uint *)d_Dst,
(uint *)d_Src,
arrayLength / (4 * THREADBLOCK_SIZE * THREADBLOCK_SIZE),
array_temp
);
getLastCudaError("scanExclusiveShared3() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
//add d_Buf to each array of d_Dst
uniformUpdate<<<arrayLength / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)d_Buf
);
//add e_Buf to each array of d_Dst
checkCudaErrors(cudaDeviceSynchronize());
uniformUpdate2<<<arrayLength / (4 * THREADBLOCK_SIZE ), THREADBLOCK_SIZE>>>(
(uint4 *)d_Dst,
(uint *)e_Buf
);
getLastCudaError("uniformUpdate() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
extern "C" size_t diff_Part(
uint *d_Dst,
uint *d_Src,
uint diff,
uint arrayLength,
uint size
)
{
const uint blockCount = iDivUp(arrayLength , LOOP_PERTHREAD*THREADBLOCK_SIZE);
diff_kernel<<<blockCount, THREADBLOCK_SIZE>>>(
d_Dst,
d_Src,
diff,
arrayLength,
size
);
getLastCudaError("diff_Part() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
return THREADBLOCK_SIZE;
}
//transport input data to output per diff
extern "C" void transport_gpu(
uint *d_Dst,
uint *d_Src,
uint loc
)
{
//Check total batch size limit
//assert((arrayLength) <= MAX_BATCH_ELEMENTS);
const uint blockCount = 1;//iDivUp(arrayLength , LOOP_PERTHREAD2*THREADBLOCK_SIZE);
transport_kernel<<<blockCount, 1>>>(
d_Dst,
d_Src,
loc
);
getLastCudaError("transport_gpu() execution FAILED\n");
checkCudaErrors(cudaDeviceSynchronize());
}
|
2afb262358d7668a65aba752cd16f01cf10bb961.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMathCompare.cuh"
#include "THHTensor.hpp"
#include "THHStream.h"
#include "../generic/THCTensorMathCompare.cu"
#include "../THCGenerateDoubleType.h"
| 2afb262358d7668a65aba752cd16f01cf10bb961.cu | #include "../THCTensorMathCompare.cuh"
#include "THCTensor.hpp"
#include "THCStream.h"
#include "../generic/THCTensorMathCompare.cu"
#include "../THCGenerateDoubleType.h"
|
900de80ecbd228b64722a8097b921387ed45fdaf.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hipcub/hipcub.hpp>
#include <vector>
#include "kernel.h"
#include "bboxUtils.h"
#include "cub_helper.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int*) d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
hipcub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes,
(const T_SCORE*) (unsorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (unsorted_bbox_indices), (int*) (sorted_bbox_indices),
arrayLen, num_images,
(const int*) d_offsets, (const int*) d_offsets + 1,
0, sizeof(T_SCORE) * 8,
stream);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(hipStream_t,
const int,
const int,
void*,
void*,
void*,
void*,
void*);
struct sspiLaunchConfig
{
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score)
: t_score(t_score)
{
}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const sspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
static std::vector<sspiLaunchConfig> sspiFuncVec;
bool sspiInit()
{
sspiFuncVec.push_back(sspiLaunchConfig(DataType::kFLOAT,
sortScoresPerImage_gpu<float>));
return true;
}
static bool initialized = sspiInit();
pluginStatus_t sortScoresPerImage(
hipStream_t stream,
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiFuncVec.size(); ++i)
{
if (lc == sspiFuncVec[i])
{
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiFuncVec[i].function(stream,
num_images,
num_items_per_image,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
| 900de80ecbd228b64722a8097b921387ed45fdaf.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cub/cub.cuh>
#include <vector>
#include "kernel.h"
#include "bboxUtils.h"
#include "cub_helper.h"
template <typename T_SCORE>
pluginStatus_t sortScoresPerImage_gpu(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
void* d_offsets = workspace;
void* cubWorkspace = nextWorkspacePtr((int8_t*) d_offsets, (num_images + 1) * sizeof(int));
setUniformOffsets(stream, num_images, num_items_per_image, (int*) d_offsets);
const int arrayLen = num_images * num_items_per_image;
size_t temp_storage_bytes = cubSortPairsWorkspaceSize<T_SCORE, int>(arrayLen, num_images);
cub::DeviceSegmentedRadixSort::SortPairsDescending(
cubWorkspace, temp_storage_bytes,
(const T_SCORE*) (unsorted_scores), (T_SCORE*) (sorted_scores),
(const int*) (unsorted_bbox_indices), (int*) (sorted_bbox_indices),
arrayLen, num_images,
(const int*) d_offsets, (const int*) d_offsets + 1,
0, sizeof(T_SCORE) * 8,
stream);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// sortScoresPerImage LAUNCH CONFIG
typedef pluginStatus_t (*sspiFunc)(cudaStream_t,
const int,
const int,
void*,
void*,
void*,
void*,
void*);
struct sspiLaunchConfig
{
DataType t_score;
sspiFunc function;
sspiLaunchConfig(DataType t_score)
: t_score(t_score)
{
}
sspiLaunchConfig(DataType t_score, sspiFunc function)
: t_score(t_score)
, function(function)
{
}
bool operator==(const sspiLaunchConfig& other)
{
return t_score == other.t_score;
}
};
static std::vector<sspiLaunchConfig> sspiFuncVec;
bool sspiInit()
{
sspiFuncVec.push_back(sspiLaunchConfig(DataType::kFLOAT,
sortScoresPerImage_gpu<float>));
return true;
}
static bool initialized = sspiInit();
pluginStatus_t sortScoresPerImage(
cudaStream_t stream,
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE,
void* unsorted_scores,
void* unsorted_bbox_indices,
void* sorted_scores,
void* sorted_bbox_indices,
void* workspace)
{
sspiLaunchConfig lc = sspiLaunchConfig(DT_SCORE);
for (unsigned i = 0; i < sspiFuncVec.size(); ++i)
{
if (lc == sspiFuncVec[i])
{
DEBUG_PRINTF("sortScoresPerImage kernel %d\n", i);
return sspiFuncVec[i].function(stream,
num_images,
num_items_per_image,
unsorted_scores,
unsorted_bbox_indices,
sorted_scores,
sorted_bbox_indices,
workspace);
}
}
return STATUS_BAD_PARAM;
}
size_t sortScoresPerImageWorkspaceSize(
const int num_images,
const int num_items_per_image,
const DataType DT_SCORE)
{
const int arrayLen = num_images * num_items_per_image;
size_t wss[2];
wss[0] = (num_images + 1) * sizeof(int); // offsets
if (DT_SCORE == DataType::kFLOAT)
{
wss[1] = cubSortPairsWorkspaceSize<float, int>(arrayLen, num_images); // cub workspace
}
else
{
printf("SCORE type not supported.\n");
return (size_t) -1;
}
return calculateTotalWorkspaceSize(wss, 2);
}
|
0b879bc9b6628a0770dc0ab970c3a14aca98ab20.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "unpackSourceKernelF32C1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
unsigned pitch = 2;
const hipSurfaceObject_t src = 1;
unsigned width = XSIZE;
unsigned height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
unpackSourceKernelF32C1), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,pitch,src,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
unpackSourceKernelF32C1), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,pitch,src,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
unpackSourceKernelF32C1), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,pitch,src,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0b879bc9b6628a0770dc0ab970c3a14aca98ab20.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "unpackSourceKernelF32C1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
unsigned pitch = 2;
const cudaSurfaceObject_t src = 1;
unsigned width = XSIZE;
unsigned height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
unpackSourceKernelF32C1<<<gridBlock,threadBlock>>>(dst,pitch,src,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
unpackSourceKernelF32C1<<<gridBlock,threadBlock>>>(dst,pitch,src,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
unpackSourceKernelF32C1<<<gridBlock,threadBlock>>>(dst,pitch,src,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
56312ee1a77d19177816d4bdd5e9612d4c1cca28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include <ctime>
#include <cmath>
#include <random>
#include <algorithm>
//WARNING!!! Do not change TPB and NO_FORCES for this demo !!!
constexpr unsigned int TPB = 128;
constexpr unsigned int NO_FORCES = 1234567;
constexpr unsigned int NO_RAIN_DROPS = 1 << 20;
constexpr unsigned int MBPTB = 8; //MEM_BLOCKS_PER_THREAD_BLOCK
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
using namespace std;
float3 *createData(const unsigned int length) {
random_device rd;
mt19937_64 mt(rd());
uniform_real_distribution<float> dist(0.0f, 1.0f);
auto *data = static_cast<float3*>(::operator new(length * sizeof(float3)));
float3* ptr = data;
for (unsigned int i = 0; i < length; i++, ptr++) {
*ptr = make_float3(dist(mt), dist(mt), dist(mt));
}
return data;
}
void printData(const float3 *data, const unsigned int length) {
if (data == nullptr) return;
const float3 *ptr = data;
for (unsigned int i = 0; i<length; i++, ptr++)
{
printf("%5.2f %5.2f %5.2f ", ptr->x, ptr->y, ptr->z);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Sums the forces to get the final one using parallel reduction.
/// WARNING!!! The method was written to meet input requirements of our example, i.e. 128 threads and 256 forces </summary>
/// <param name="dForces"> The forces. </param>
/// <param name="noForces"> The number of forces. </param>
/// <param name="dFinalForce"> [in,out] If non-null, the final force. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void reduce(const float3 * __restrict__ dForces, const unsigned int noForces, float3* __restrict__ dFinalForce) {
__shared__ float3 sForces[TPB]; //SEE THE WARNING MESSAGE !!!
const float3* start = &dForces[2 * blockDim.x * blockIdx.x];
unsigned int tid = threadIdx.x;
unsigned int next = TPB; //SEE THE WARNING MESSAGE !!!
float3* src = &sForces[tid];
float3* src2;
if (2 * blockDim.x * blockIdx.x + threadIdx.x >= noForces) { //src saha mimo dForces
*src = make_float3(0,0,0);
} else {
*src = start[tid];
if (2 * blockDim.x * blockIdx.x + threadIdx.x + next < noForces) { //src + next nesaha mimo dForces
src2 = (float3*)&start[tid + next];
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
}
}
__syncthreads();
next >>= 1; //64
if (tid >= next) return;
src2 = src + next;
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
__syncthreads();
while (next > 1) { //32 AND DOWN
next >>= 1;
if (tid >= next) return;
volatile float3* vsrc = &sForces[tid];
volatile float3* vsrc2 = vsrc + next;
vsrc->x += vsrc2->x;
vsrc->y += vsrc2->y;
vsrc->z += vsrc2->z;
}
if (tid == 0)
dFinalForce[blockIdx.x] = sForces[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Adds the FinalForce to every Rain drops position. </summary>
/// <param name="dFinalForce"> The final force. </param>
/// <param name="noRainDrops"> The number of rain drops. </param>
/// <param name="dRainDrops"> [in,out] If non-null, the rain drops positions. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void add(const float3* __restrict__ dFinalForce, const unsigned int noRainDrops, float3* __restrict__ dRainDrops) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int jump = gridDim.x * blockDim.x;
while (i < noRainDrops) {
dRainDrops[i].x += dFinalForce->x;
dRainDrops[i].y += dFinalForce->y;
dRainDrops[i].z += dFinalForce->z;
i += jump;
}
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
hipEvent_t startEvent, stopEvent;
float elapsedTime;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
hipEventRecord(startEvent, 0);
float3 *hForces = createData(NO_FORCES);
float3 *hDrops = createData(NO_RAIN_DROPS);
float3 *dForces = nullptr;
float3 *dDrops = nullptr;
float3 *dFinalForce = nullptr;
error = hipMalloc((void**)&dForces, NO_FORCES * sizeof(float3));
error = hipMemcpy(dForces, hForces, NO_FORCES * sizeof(float3), hipMemcpyHostToDevice);
error = hipMalloc((void**)&dDrops, NO_RAIN_DROPS * sizeof(float3));
error = hipMemcpy(dDrops, hDrops, NO_RAIN_DROPS * sizeof(float3), hipMemcpyHostToDevice);
//for (unsigned int i = 0; i<1000; i++) {
KernelSetting ksReduce;
ksReduce.dimBlock = dim3(TPB, 1, 1);
ksReduce.dimGrid = dim3(NO_FORCES / (2 * ksReduce.dimBlock.x) + (NO_FORCES % ksReduce.dimBlock.x ? 1 : 0), 1, 1);
error = hipMalloc((void**)&dFinalForce, ksReduce.dimGrid.x * sizeof(float3));
unsigned int dataCount = NO_FORCES;
float3* dTmpData = dForces;
float3* dTmpRes = dFinalForce;
while (ksReduce.dimGrid.x > 1) { //Ukld vsledky redukce do pomocn pamti pro kad blok a redukuje, dokud nezbyde pouze jedin vsledek.
hipLaunchKernelGGL(( reduce), dim3(ksReduce.dimGrid), dim3(ksReduce.dimBlock), 0, 0, dTmpData, dataCount, dTmpRes);
std::swap(dTmpData, dTmpRes);
dataCount = ksReduce.dimGrid.x;
ksReduce.dimGrid.x = dataCount / (2 * ksReduce.dimBlock.x) + (dataCount % ksReduce.dimBlock.x ? 1 : 0);
}
hipLaunchKernelGGL(( reduce), dim3(ksReduce.dimGrid), dim3(ksReduce.dimBlock), 0, 0, dTmpData, dataCount, dTmpRes);
if (dFinalForce != dTmpRes)
hipMemcpy(dFinalForce, dTmpRes, sizeof(float3), hipMemcpyKind::hipMemcpyDeviceToDevice);
KernelSetting ksAdd;
ksAdd.dimBlock = dim3(TPB, 1, 1);
ksAdd.dimGrid = dim3(getNumberOfParts(NO_RAIN_DROPS, TPB * MBPTB), 1, 1);
hipLaunchKernelGGL(( add), dim3(ksAdd.dimGrid), dim3(ksAdd.dimBlock), 0, 0, dFinalForce, NO_RAIN_DROPS, dDrops);
//}
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
checkDeviceMatrix<float>((float*)dDrops, sizeof(float3), NO_RAIN_DROPS, 3, "%5.2f ", "Final Rain Drops");
checkDeviceMatrix<float>((float*)dFinalForce, sizeof(float3),1, 3, "%5.2f ", "Final force");
if (hForces)
free(hForces);
if (hDrops)
free(hDrops);
hipFree(dForces);
hipFree(dDrops);
hipEventElapsedTime(&elapsedTime, startEvent, stopEvent);
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
printf("Time to get device properties: %f ms", elapsedTime);
}
| 56312ee1a77d19177816d4bdd5e9612d4c1cca28.cu | #include <cudaDefs.h>
#include <ctime>
#include <cmath>
#include <random>
#include <algorithm>
//WARNING!!! Do not change TPB and NO_FORCES for this demo !!!
constexpr unsigned int TPB = 128;
constexpr unsigned int NO_FORCES = 1234567;
constexpr unsigned int NO_RAIN_DROPS = 1 << 20;
constexpr unsigned int MBPTB = 8; //MEM_BLOCKS_PER_THREAD_BLOCK
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
using namespace std;
float3 *createData(const unsigned int length) {
random_device rd;
mt19937_64 mt(rd());
uniform_real_distribution<float> dist(0.0f, 1.0f);
auto *data = static_cast<float3*>(::operator new(length * sizeof(float3)));
float3* ptr = data;
for (unsigned int i = 0; i < length; i++, ptr++) {
*ptr = make_float3(dist(mt), dist(mt), dist(mt));
}
return data;
}
void printData(const float3 *data, const unsigned int length) {
if (data == nullptr) return;
const float3 *ptr = data;
for (unsigned int i = 0; i<length; i++, ptr++)
{
printf("%5.2f %5.2f %5.2f ", ptr->x, ptr->y, ptr->z);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Sums the forces to get the final one using parallel reduction.
/// WARNING!!! The method was written to meet input requirements of our example, i.e. 128 threads and 256 forces </summary>
/// <param name="dForces"> The forces. </param>
/// <param name="noForces"> The number of forces. </param>
/// <param name="dFinalForce"> [in,out] If non-null, the final force. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void reduce(const float3 * __restrict__ dForces, const unsigned int noForces, float3* __restrict__ dFinalForce) {
__shared__ float3 sForces[TPB]; //SEE THE WARNING MESSAGE !!!
const float3* start = &dForces[2 * blockDim.x * blockIdx.x];
unsigned int tid = threadIdx.x;
unsigned int next = TPB; //SEE THE WARNING MESSAGE !!!
float3* src = &sForces[tid];
float3* src2;
if (2 * blockDim.x * blockIdx.x + threadIdx.x >= noForces) { //src saha mimo dForces
*src = make_float3(0,0,0);
} else {
*src = start[tid];
if (2 * blockDim.x * blockIdx.x + threadIdx.x + next < noForces) { //src + next nesaha mimo dForces
src2 = (float3*)&start[tid + next];
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
}
}
__syncthreads();
next >>= 1; //64
if (tid >= next) return;
src2 = src + next;
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
__syncthreads();
while (next > 1) { //32 AND DOWN
next >>= 1;
if (tid >= next) return;
volatile float3* vsrc = &sForces[tid];
volatile float3* vsrc2 = vsrc + next;
vsrc->x += vsrc2->x;
vsrc->y += vsrc2->y;
vsrc->z += vsrc2->z;
}
if (tid == 0)
dFinalForce[blockIdx.x] = sForces[0];
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Adds the FinalForce to every Rain drops position. </summary>
/// <param name="dFinalForce"> The final force. </param>
/// <param name="noRainDrops"> The number of rain drops. </param>
/// <param name="dRainDrops"> [in,out] If non-null, the rain drops positions. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void add(const float3* __restrict__ dFinalForce, const unsigned int noRainDrops, float3* __restrict__ dRainDrops) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int jump = gridDim.x * blockDim.x;
while (i < noRainDrops) {
dRainDrops[i].x += dFinalForce->x;
dRainDrops[i].y += dFinalForce->y;
dRainDrops[i].z += dFinalForce->z;
i += jump;
}
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
cudaEvent_t startEvent, stopEvent;
float elapsedTime;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
cudaEventRecord(startEvent, 0);
float3 *hForces = createData(NO_FORCES);
float3 *hDrops = createData(NO_RAIN_DROPS);
float3 *dForces = nullptr;
float3 *dDrops = nullptr;
float3 *dFinalForce = nullptr;
error = cudaMalloc((void**)&dForces, NO_FORCES * sizeof(float3));
error = cudaMemcpy(dForces, hForces, NO_FORCES * sizeof(float3), cudaMemcpyHostToDevice);
error = cudaMalloc((void**)&dDrops, NO_RAIN_DROPS * sizeof(float3));
error = cudaMemcpy(dDrops, hDrops, NO_RAIN_DROPS * sizeof(float3), cudaMemcpyHostToDevice);
//for (unsigned int i = 0; i<1000; i++) {
KernelSetting ksReduce;
ksReduce.dimBlock = dim3(TPB, 1, 1);
ksReduce.dimGrid = dim3(NO_FORCES / (2 * ksReduce.dimBlock.x) + (NO_FORCES % ksReduce.dimBlock.x ? 1 : 0), 1, 1);
error = cudaMalloc((void**)&dFinalForce, ksReduce.dimGrid.x * sizeof(float3));
unsigned int dataCount = NO_FORCES;
float3* dTmpData = dForces;
float3* dTmpRes = dFinalForce;
while (ksReduce.dimGrid.x > 1) { //Ukládá výsledky redukce do pomocné paměti pro každý blok a redukuje, dokud nezbyde pouze jediný výsledek.
reduce<<<ksReduce.dimGrid, ksReduce.dimBlock>>>(dTmpData, dataCount, dTmpRes);
std::swap(dTmpData, dTmpRes);
dataCount = ksReduce.dimGrid.x;
ksReduce.dimGrid.x = dataCount / (2 * ksReduce.dimBlock.x) + (dataCount % ksReduce.dimBlock.x ? 1 : 0);
}
reduce<<<ksReduce.dimGrid, ksReduce.dimBlock>>>(dTmpData, dataCount, dTmpRes);
if (dFinalForce != dTmpRes)
cudaMemcpy(dFinalForce, dTmpRes, sizeof(float3), cudaMemcpyKind::cudaMemcpyDeviceToDevice);
KernelSetting ksAdd;
ksAdd.dimBlock = dim3(TPB, 1, 1);
ksAdd.dimGrid = dim3(getNumberOfParts(NO_RAIN_DROPS, TPB * MBPTB), 1, 1);
add<<<ksAdd.dimGrid, ksAdd.dimBlock>>>(dFinalForce, NO_RAIN_DROPS, dDrops);
//}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
checkDeviceMatrix<float>((float*)dDrops, sizeof(float3), NO_RAIN_DROPS, 3, "%5.2f ", "Final Rain Drops");
checkDeviceMatrix<float>((float*)dFinalForce, sizeof(float3),1, 3, "%5.2f ", "Final force");
if (hForces)
free(hForces);
if (hDrops)
free(hDrops);
cudaFree(dForces);
cudaFree(dDrops);
cudaEventElapsedTime(&elapsedTime, startEvent, stopEvent);
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
printf("Time to get device properties: %f ms", elapsedTime);
}
|
dc9dafdcd08ebe2d33bb5469c57bd4645348170a.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "gemm_operation_profiler.h"
#include "gpu_timer.h"
#include "cutlass/library/library.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
GemmOperationProfiler::GemmOperationProfiler(Options const& options)
: OperationProfiler(
options, library::OperationKind::kGemm,
{
{ArgumentTypeID::kEnumerated,
{"gemm_kind"},
"Variant of GEMM (gemm, batched, array, universal, "
"planar_complex, planar_complex_array)"},
{ArgumentTypeID::kInteger,
{"m", "problem-size::m"},
"M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"n", "problem-size::n"},
"N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"k", "problem-size::k"},
"K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor,
{"A"},
"Tensor storing the A operand"},
{ArgumentTypeID::kTensor,
{"B"},
"Tensor storing the B operand"},
{ArgumentTypeID::kTensor,
{"C"},
"Tensor storing the C operand"},
{ArgumentTypeID::kScalar,
{"alpha", "epilogue::alpha"},
"Epilogue scalar alpha"},
{ArgumentTypeID::kScalar,
{"beta", "epilogue::beta"},
"Epilogue scalar beta"},
{ArgumentTypeID::kInteger,
{"split_k_slices", "split-k-slices"},
"Number of partitions of K dimension"},
{ArgumentTypeID::kInteger,
{"batch_count", "batch-count"},
"Number of GEMMs computed in one batch"},
},
{library::Provider::kCUBLAS}) {
description_ =
" General matrix-matrix product. D = alpha * A*B + beta * C";
}
/// Destructor
GemmOperationProfiler::~GemmOperationProfiler() {}
/// Prints usage statement for the math function
void GemmOperationProfiler::print_usage(std::ostream& out) const {
out << "GEMM"
<< "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void GemmOperationProfiler::print_examples(std::ostream& out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 "
"--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Gemm "
"--accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with "
"row-major (For column major, use column, col, or n. For row major "
"use, row or t):\n"
<< " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace "
"if results are incorrect (note that --cta-tile::k=32 is default "
"cta-tile size):\n"
<< " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 "
"--cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and "
"save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Gemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status GemmOperationProfiler::GemmProblem::parse(
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
this->mode = library::GemmUniversalMode::kGemm;
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
this->mode = library::GemmUniversalMode::kGemm;
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space,
problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
} else if (this->batch_count > 1) {
this->mode = library::GemmUniversalMode::kBatched;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha",
problem_space, problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue,
1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta",
problem_space, problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->k)})
.front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)})
.front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)})
.front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t GemmOperationProfiler::GemmProblem::bytes(
library::GemmDescription const& operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) *
k +
int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) *
k +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(),
[](uint8_t i) { return i == 0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m /
8) *
n;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t GemmOperationProfiler::GemmProblem::flops(
library::GemmDescription const& operation_desc) const {
int64_t flops_ = (int64_t(m) * n * k + m * n) * 2 * batch_count;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default:
break;
}
return flops_;
}
/// Initializes a performance result
void GemmOperationProfiler::GemmProblem::initialize_result(
PerformanceResult& result,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space,
library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) +
":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) +
":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) +
":" + library::to_string(operation_desc.C.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status GemmOperationProfiler::initialize_configuration(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.mode = problem_.mode;
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
if (problem_.mode == library::GemmUniversalMode::kBatched) {
gemm_workspace_.configuration.batch_count = problem_.batch_count;
} else {
gemm_workspace_.configuration.batch_count = problem_.split_k_slices;
}
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc,
problem_space);
return operation->can_implement(&gemm_workspace_.configuration,
&gemm_workspace_.arguments);
}
/// Initializes the performance result
void GemmOperationProfiler::initialize_result_(
PerformanceResult& result, Options const& options,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc,
problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initializes workspace
Status GemmOperationProfiler::initialize_workspace(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
gemm_workspace_.problem_count =
1 +
int((3 * int64_t(options.device.properties.l2CacheSize)) /
bytes);
} else {
gemm_workspace_.problem_count = 1;
}
} else {
gemm_workspace_.problem_count = options.profiling.workspace_count;
}
if (options.execution_mode != ExecutionMode::kDryRun) {
gemm_workspace_.A = device_context.allocate_tensor(
options, "A", operation_desc.A.element, operation_desc.A.layout,
{int(problem_.m), int(problem_.k)}, {int(problem_.lda)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.B = device_context.allocate_tensor(
options, "B", operation_desc.B.element, operation_desc.B.layout,
{int(problem_.k), int(problem_.n)}, {int(problem_.ldb)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.C = device_context.allocate_tensor(
options, "C", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8,
workspace_size);
status = operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kGemm;
results_.back().disposition = Disposition::kNotRun;
for (auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_cutlass(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A =
gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B =
gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C =
gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Computed->batch_stride();
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
hipError_t result = hipDeviceSynchronize();
if (result != hipSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(options, report, device_context, operation,
problem_space, problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
verify_with_reference_(options, report, device_context, operation,
problem_space, problem);
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for (auto& m : results_.back().verification_map) {
if (m.second == Disposition::kFailed ||
m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if (!is_any_verification_run_passed &&
m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if (is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_with_cublas_(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
#if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
hipblasStatus_t status = handle.get_cublas_create_status();
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
get_cutlass_disposition(status);
return true;
}
std::vector<hipblasGemmAlgo_t> algorithms;
detail::select_cublas_algorithms(algorithms, options, gemm_desc);
if (algorithms.empty()) {
// no algorithm selected
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to hipblasGemmEx()
//
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.batch_stride_A =
gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.batch_stride_B =
gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
detail::cublasGemmExDispatcher gemm_op(
gemm_desc, gemm_workspace_.configuration,
gemm_workspace_.arguments, algorithms.front());
if (gemm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = gemm_op(handle);
// Handle errors
if (status != HIPBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] =
compare_tensors(options, *gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride());
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] ==
Disposition::kIncorrect) {
save_workspace(device_context, options, gemm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
} catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against host and device references
bool GemmOperationProfiler::verify_with_reference_(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
//
// Initialize state
//
library::Provider references[] = {library::Provider::kReferenceDevice,
library::Provider::kReferenceHost};
for (auto provider : references) {
// Skip providers that are not enabled
if (!options.verification.provider_enabled(provider)) {
continue;
}
void* ptr_A = gemm_workspace_.A->data();
void* ptr_B = gemm_workspace_.B->data();
void* ptr_C = gemm_workspace_.C->data();
void* ptr_D = gemm_workspace_.Reference->data();
// To support the host-side reference, conditionally allocate and
// copy tensors to host memory.
std::vector<uint8_t> host_data_A;
std::vector<uint8_t> host_data_B;
std::vector<uint8_t> host_data_C;
std::vector<uint8_t> host_data_D;
if (provider == library::Provider::kReferenceHost) {
host_data_A.resize(gemm_workspace_.A->bytes());
ptr_A = host_data_A.data();
gemm_workspace_.A->copy_to_host(ptr_A);
host_data_B.resize(gemm_workspace_.B->bytes());
ptr_B = host_data_B.data();
gemm_workspace_.B->copy_to_host(ptr_B);
host_data_C.resize(gemm_workspace_.C->bytes());
ptr_C = host_data_C.data();
gemm_workspace_.C->copy_to_host(ptr_C);
host_data_D.resize(gemm_workspace_.Reference->bytes());
ptr_D = host_data_D.data();
}
//
// Launch
//
library::Handle handle;
handle.set_provider(provider);
Status status = handle.gemm_universal(
library::GemmUniversalMode::kGemm,
gemm_workspace_.configuration.problem_size.m(),
gemm_workspace_.configuration.problem_size.n(),
gemm_workspace_.configuration.problem_size.k(),
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
problem_.alpha.data(),
gemm_desc.A.element, gemm_desc.A.layout, gemm_desc.transform_A,
ptr_A, int(gemm_workspace_.configuration.lda),
gemm_desc.B.element, gemm_desc.B.layout, gemm_desc.transform_B,
ptr_B, int(gemm_workspace_.configuration.ldb),
problem_.beta.data(),
gemm_desc.C.element, ptr_C,
int(gemm_workspace_.configuration.ldc),
ptr_D, int(gemm_workspace_.configuration.ldd),
gemm_workspace_.configuration.batch_count,
gemm_workspace_.A->batch_stride(),
gemm_workspace_.B->batch_stride(),
gemm_workspace_.C->batch_stride(),
gemm_workspace_.Reference->batch_stride());
if (status != Status::kSuccess) {
results_.back().verification_map[provider] = Disposition::kNotRun;
return true;
}
results_.back().status = status;
if (provider == library::Provider::kReferenceHost) {
gemm_workspace_.Reference->copy_from_host(ptr_D);
}
//
// Verify results
//
results_.back().verification_map[provider] = compare_tensors(
options, *gemm_workspace_.Computed, *gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride());
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[provider] ==
Disposition::kIncorrect) {
save_workspace(device_context, options, gemm_desc,
library::Provider::kCUTLASS, provider);
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool GemmOperationProfiler::profile(Options const& options,
PerformanceReport& report,
DeviceContext& device_context,
library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A =
gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B =
gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C =
gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Computed->batch_stride();
results_.back().status =
profile_cutlass_(results_.back().runtime, options, operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Method to profile a CUTLASS Operation
Status GemmOperationProfiler::profile_cutlass_(
double& runtime, Options const& options,
library::Operation const* operation, void* arguments,
void* host_workspace, void* device_workspace) {
GpuTimer timer;
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations;
++iteration) {
int problem_idx = (iteration % gemm_workspace_.problem_count) *
problem_.batch_count;
gemm_workspace_.arguments.A =
gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B =
gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C =
gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D =
gemm_workspace_.Computed->batch_data(problem_idx);
// Execute the CUTLASS operation
status = operation->run(&gemm_workspace_.arguments, host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Iterate over copies of the problem in memory
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % gemm_workspace_.problem_count) *
problem_.batch_count;
gemm_workspace_.arguments.A =
gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B =
gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C =
gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D =
gemm_workspace_.Computed->batch_data(problem_idx);
status = operation->run(arguments, host_workspace, device_workspace);
if (status != Status::kSuccess) {
return status;
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
| dc9dafdcd08ebe2d33bb5469c57bd4645348170a.cu | /***************************************************************************************************
* Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/* \file
\brief Execution environment
*/
#include <iostream>
#include <stdexcept>
#include <iomanip>
#include <ios>
#include "cutlass/core_io.h"
#include "cublas_helpers.h"
#include "gemm_operation_profiler.h"
#include "gpu_timer.h"
#include "cutlass/library/library.h"
#include "cutlass/library/handle.h"
/////////////////////////////////////////////////////////////////////////////////////////////////
namespace cutlass {
namespace profiler {
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Ctor
GemmOperationProfiler::GemmOperationProfiler(Options const& options)
: OperationProfiler(
options, library::OperationKind::kGemm,
{
{ArgumentTypeID::kEnumerated,
{"gemm_kind"},
"Variant of GEMM (gemm, batched, array, universal, "
"planar_complex, planar_complex_array)"},
{ArgumentTypeID::kInteger,
{"m", "problem-size::m"},
"M dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"n", "problem-size::n"},
"N dimension of the GEMM problem space"},
{ArgumentTypeID::kInteger,
{"k", "problem-size::k"},
"K dimension of the GEMM problem space"},
{ArgumentTypeID::kTensor,
{"A"},
"Tensor storing the A operand"},
{ArgumentTypeID::kTensor,
{"B"},
"Tensor storing the B operand"},
{ArgumentTypeID::kTensor,
{"C"},
"Tensor storing the C operand"},
{ArgumentTypeID::kScalar,
{"alpha", "epilogue::alpha"},
"Epilogue scalar alpha"},
{ArgumentTypeID::kScalar,
{"beta", "epilogue::beta"},
"Epilogue scalar beta"},
{ArgumentTypeID::kInteger,
{"split_k_slices", "split-k-slices"},
"Number of partitions of K dimension"},
{ArgumentTypeID::kInteger,
{"batch_count", "batch-count"},
"Number of GEMMs computed in one batch"},
},
{library::Provider::kCUBLAS}) {
description_ =
" General matrix-matrix product. D = alpha * A*B + beta * C";
}
/// Destructor
GemmOperationProfiler::~GemmOperationProfiler() {}
/// Prints usage statement for the math function
void GemmOperationProfiler::print_usage(std::ostream& out) const {
out << "GEMM"
<< "\n\n";
OperationProfiler::print_usage(out);
}
/// Prints examples
void GemmOperationProfiler::print_examples(std::ostream& out) const {
out << "\nExamples:\n\n"
<< "Profile a particular problem size:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024 --n=1024 --k=128\n\n"
<< "Schmoo over problem size and beta:\n"
<< " $ cutlass_profiler --operation=Gemm --m=1024:4096:256 "
"--n=1024:4096:256 --k=128:8192:128 --beta=0,1,2.5\n\n"
<< "Schmoo over accumulator types:\n"
<< " $ cutlass_profiler --operation=Gemm "
"--accumulator-type=f16,f32\n\n"
<< "Run when A is f16 with column-major and B is any datatype with "
"row-major (For column major, use column, col, or n. For row major "
"use, row or t):\n"
<< " $ cutlass_profiler --operation=Gemm --A=f16:column --B=*:row\n\n"
<< "Using various input value distribution:\n"
<< " $ cutlass_profiler --operation=Gemm --dist=uniform,min:0,max:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=gaussian,mean:0,stddev:3\n"
<< " $ cutlass_profiler --operation=Gemm "
"--dist=sequential,start:0,delta:1\n\n"
<< "Run a kernel with cta tile size of 256x128x32 and save workspace "
"if results are incorrect (note that --cta-tile::k=32 is default "
"cta-tile size):\n"
<< " $ cutlass_profiler --operation=Gemm --cta_m=256 --cta_n=128 "
"--cta_k=32 --save-workspace=incorrect\n\n"
<< "Test your changes to gemm kernels with a quick functional test and "
"save results in functional-test.csv:\n"
<< " $ cutlass_profiler --operation=Gemm \\ \n"
<< " --m=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --n=8,56,120,136,256,264,512,520,1024,1032,4096,8192,16384 \\ \n"
<< " --k=8,16,32,64,128,256,288,384,504,512,520 \\ \n"
<< " --beta=0,1,2 --profiling-iterations=1 \\ \n"
<< " --providers=cutlass --output=functional-test.csv\n\n";
}
/////////////////////////////////////////////////////////////////////////////////////////////////
#if 0
// used this for debugging
static std::string byte_string(std::vector<uint8_t> const &bytes) {
std::stringstream ss;
ss << "0x";
for (size_t idx = bytes.size(); idx > 0; --idx) {
ss << std::hex << std::setw(2) << std::setfill('0') << uint32_t(bytes.at(idx - 1));
}
return ss.str();
}
#endif
Status GemmOperationProfiler::GemmProblem::parse(
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
this->mode = library::GemmUniversalMode::kGemm;
if (!arg_as_int(this->m, "m", problem_space, problem)) {
// default value
this->m = 1024;
}
if (!arg_as_int(this->n, "n", problem_space, problem)) {
// default value
this->n = 1024;
}
if (!arg_as_int(this->k, "k", problem_space, problem)) {
// default value
this->k = 1024;
}
this->mode = library::GemmUniversalMode::kGemm;
if (!arg_as_int(this->split_k_slices, "split_k_slices", problem_space,
problem)) {
// default value
this->split_k_slices = 1;
}
if (!arg_as_int(this->batch_count, "batch_count", problem_space, problem)) {
// default value
this->batch_count = 1;
} else if (this->batch_count > 1) {
this->mode = library::GemmUniversalMode::kBatched;
}
if (this->split_k_slices > 1 && this->batch_count > 1) {
// At least one of these must be one
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.A, "A", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.B, "B", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!tensor_description_satisfies(operation_desc.C, "C", problem_space,
problem)) {
return Status::kErrorInvalidProblem;
}
if (!arg_as_scalar(this->alpha, operation_desc.element_epilogue, "alpha",
problem_space, problem)) {
if (!cast_from_double(this->alpha, operation_desc.element_epilogue,
1)) {
return Status::kErrorInternal;
}
}
if (!arg_as_scalar(this->beta, operation_desc.element_epilogue, "beta",
problem_space, problem)) {
if (!cast_from_double(this->beta, operation_desc.element_epilogue, 0)) {
return Status::kErrorInternal;
}
}
this->lda = DeviceAllocation::get_packed_layout(
operation_desc.A.layout, {int(this->m), int(this->k)})
.front();
this->ldb = DeviceAllocation::get_packed_layout(
operation_desc.B.layout, {int(this->k), int(this->n)})
.front();
this->ldc = DeviceAllocation::get_packed_layout(
operation_desc.C.layout, {int(this->m), int(this->n)})
.front();
return Status::kSuccess;
}
/// Total number of bytes loaded
int64_t GemmOperationProfiler::GemmProblem::bytes(
library::GemmDescription const& operation_desc) const {
// Input bytes read and Output bytes written for the gemm problem
int64_t bytes =
int64_t(library::sizeof_bits(operation_desc.A.element) * m / 8) *
k +
int64_t(library::sizeof_bits(operation_desc.B.element) * n / 8) *
k +
int64_t(library::sizeof_bits(operation_desc.C.element) * m / 8) * n;
// Set is_beta_zero true if beta is zero
bool is_beta_zero = std::all_of(beta.begin(), beta.end(),
[](uint8_t i) { return i == 0; });
// Output bytes read for the gemm problem for non-zero beta values
if (!is_beta_zero) {
bytes += int64_t(library::sizeof_bits(operation_desc.C.element) * m /
8) *
n;
}
bytes *= batch_count;
return bytes;
}
/// Total number of flops computed
int64_t GemmOperationProfiler::GemmProblem::flops(
library::GemmDescription const& operation_desc) const {
int64_t flops_ = (int64_t(m) * n * k + m * n) * 2 * batch_count;
// complex-valued support
switch (operation_desc.tile_description.math_instruction.math_operation) {
case library::MathOperationID::kMultiplyAddComplex:
flops_ *= 4;
break;
case library::MathOperationID::kMultiplyAddGaussianComplex:
flops_ *= 3;
break;
default:
break;
}
return flops_;
}
/// Initializes a performance result
void GemmOperationProfiler::GemmProblem::initialize_result(
PerformanceResult& result,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.arguments.resize(problem_space.rank());
set_argument(result, "gemm_kind", problem_space,
library::to_string(operation_desc.gemm_kind));
set_argument(result, "A", problem_space,
std::string(library::to_string(operation_desc.A.element)) +
":" + library::to_string(operation_desc.A.layout));
set_argument(result, "B", problem_space,
std::string(library::to_string(operation_desc.B.element)) +
":" + library::to_string(operation_desc.B.layout));
set_argument(result, "C", problem_space,
std::string(library::to_string(operation_desc.C.element)) +
":" + library::to_string(operation_desc.C.layout));
set_argument(result, "m", problem_space, m);
set_argument(result, "n", problem_space, n);
set_argument(result, "k", problem_space, k);
set_argument(result, "split_k_slices", problem_space, split_k_slices);
set_argument(result, "batch_count", problem_space, batch_count);
set_argument(result, "alpha", problem_space,
library::lexical_cast(alpha, operation_desc.element_epilogue));
set_argument(result, "beta", problem_space,
library::lexical_cast(beta, operation_desc.element_epilogue));
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Extracts the problem dimensions
Status GemmOperationProfiler::initialize_configuration(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (operation_desc.gemm_kind != library::GemmKind::kUniversal) {
return Status::kErrorInvalidProblem;
}
Status status = problem_.parse(operation_desc, problem_space, problem);
if (status != Status::kSuccess) {
return status;
}
gemm_workspace_.configuration.mode = problem_.mode;
gemm_workspace_.configuration.problem_size.m() = int(problem_.m);
gemm_workspace_.configuration.problem_size.n() = int(problem_.n);
gemm_workspace_.configuration.problem_size.k() = int(problem_.k);
gemm_workspace_.configuration.lda = problem_.lda;
gemm_workspace_.configuration.ldb = problem_.ldb;
gemm_workspace_.configuration.ldc = problem_.ldc;
gemm_workspace_.configuration.ldd = problem_.ldc;
if (problem_.mode == library::GemmUniversalMode::kBatched) {
gemm_workspace_.configuration.batch_count = problem_.batch_count;
} else {
gemm_workspace_.configuration.batch_count = problem_.split_k_slices;
}
gemm_workspace_.arguments.A = nullptr;
gemm_workspace_.arguments.B = nullptr;
gemm_workspace_.arguments.C = nullptr;
gemm_workspace_.arguments.D = nullptr;
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
initialize_result_(this->model_result_, options, operation_desc,
problem_space);
return operation->can_implement(&gemm_workspace_.configuration,
&gemm_workspace_.arguments);
}
/// Initializes the performance result
void GemmOperationProfiler::initialize_result_(
PerformanceResult& result, Options const& options,
library::GemmDescription const& operation_desc,
ProblemSpace const& problem_space) {
result.provider = library::Provider::kCUTLASS;
result.disposition = Disposition::kNotRun;
result.status = Status::kSuccess;
result.operation_name = operation_desc.name;
problem_.initialize_result(result, operation_desc, problem_space);
OperationProfiler::initialize_result_(result, operation_desc,
problem_space);
result.bytes = problem_.bytes(operation_desc);
result.flops = problem_.flops(operation_desc);
result.runtime = 0;
}
/// Initializes workspace
Status GemmOperationProfiler::initialize_workspace(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& operation_desc =
static_cast<library::GemmDescription const&>(
operation->description());
// Compute the number of copies of the problem to avoid L2 camping.
if (!options.profiling.workspace_count) {
int64_t bytes = problem_.bytes(operation_desc);
if (bytes < 3 * int64_t(options.device.properties.l2CacheSize)) {
gemm_workspace_.problem_count =
1 +
int((3 * int64_t(options.device.properties.l2CacheSize)) /
bytes);
} else {
gemm_workspace_.problem_count = 1;
}
} else {
gemm_workspace_.problem_count = options.profiling.workspace_count;
}
if (options.execution_mode != ExecutionMode::kDryRun) {
gemm_workspace_.A = device_context.allocate_tensor(
options, "A", operation_desc.A.element, operation_desc.A.layout,
{int(problem_.m), int(problem_.k)}, {int(problem_.lda)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.B = device_context.allocate_tensor(
options, "B", operation_desc.B.element, operation_desc.B.layout,
{int(problem_.k), int(problem_.n)}, {int(problem_.ldb)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.C = device_context.allocate_tensor(
options, "C", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.Computed = device_context.allocate_tensor(
"D", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.Reference = device_context.allocate_tensor(
"Reference", operation_desc.C.element, operation_desc.C.layout,
{int(problem_.m), int(problem_.n)}, {int(problem_.ldc)},
problem_.batch_count * gemm_workspace_.problem_count);
gemm_workspace_.Reference->copy_from_device(gemm_workspace_.C->data());
}
//
// Initialize the CUTLASS operation
//
Status status = Status::kSuccess;
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
if (options.execution_mode != ExecutionMode::kDryRun) {
uint64_t workspace_size = operation->get_host_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.host_workspace.resize(workspace_size, 0);
workspace_size = operation->get_device_workspace_size(
&gemm_workspace_.configuration);
gemm_workspace_.device_workspace.reset(library::NumericTypeID::kU8,
workspace_size);
status = operation->initialize(
&gemm_workspace_.configuration,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
//
// If CUTLASS is enabled, generate a result for it
//
results_.push_back(model_result_);
results_.back().provider = library::Provider::kCUTLASS;
results_.back().op_kind = library::OperationKind::kGemm;
results_.back().disposition = Disposition::kNotRun;
for (auto provider : verification_providers_) {
results_.back().verification_map[provider] = Disposition::kNotRun;
}
}
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_cutlass(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (!options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
return true;
}
if (options.execution_mode == ExecutionMode::kDryRun) {
return true;
}
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode = library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A =
gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B =
gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C =
gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Computed->batch_stride();
//
// Run the CUTLASS operation
//
results_.back().status = operation->run(
&gemm_workspace_.arguments, gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
if (results_.back().status != Status::kSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
cudaError_t result = cudaDeviceSynchronize();
if (result != cudaSuccess) {
results_.back().disposition = Disposition::kFailed;
return false;
}
// CUTLASS op ran the but not yet verified against any verification provider
results_.back().disposition = Disposition::kNotVerified;
//
// Run verification providers
//
if (options.verification.enabled) {
#if CUTLASS_ENABLE_CUBLAS
if (options.verification.provider_enabled(library::Provider::kCUBLAS)) {
// Guard against unsupported cases
auto const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
if (cublas_satisfies(gemm_desc) == Status::kSuccess) {
// call cublas verification if supported
verify_with_cublas_(options, report, device_context, operation,
problem_space, problem);
}
else {
// set verification map for cublas to not supported
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotSupported;
}
}
#endif // #if CUTLASS_ENABLE_CUBLAS
verify_with_reference_(options, report, device_context, operation,
problem_space, problem);
// Update disposition to worst case verification outcome among all
// verification providers which are supported
bool is_any_verification_run_passed = false;
for (auto& m : results_.back().verification_map) {
if (m.second == Disposition::kFailed ||
m.second == Disposition::kIncorrect) {
results_.back().disposition = m.second;
return true;
}
if (!is_any_verification_run_passed &&
m.second == Disposition::kPassed) {
is_any_verification_run_passed = true;
}
}
if (is_any_verification_run_passed) {
results_.back().disposition = Disposition::kPassed;
}
}
// Return true means continue profiling
return true;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against references
bool GemmOperationProfiler::verify_with_cublas_(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
#if CUTLASS_ENABLE_CUBLAS
library::GemmDescription const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
//
// Construct cuBLAS operators
//
CublasCreate handle;
cublasStatus_t status = handle.get_cublas_create_status();
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
get_cutlass_disposition(status);
return true;
}
std::vector<cublasGemmAlgo_t> algorithms;
detail::select_cublas_algorithms(algorithms, options, gemm_desc);
if (algorithms.empty()) {
// no algorithm selected
return true;
}
//
// Initialize state
//
try {
//
// Construct dispatcher to cublasGemmEx()
//
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.batch_stride_A =
gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.batch_stride_B =
gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.C = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.D = gemm_workspace_.Reference->data();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Reference->batch_stride();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
detail::cublasGemmExDispatcher gemm_op(
gemm_desc, gemm_workspace_.configuration,
gemm_workspace_.arguments, algorithms.front());
if (gemm_op.status != Status::kSuccess) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kNotRun;
return true;
}
results_.back().status = Status::kSuccess;
status = gemm_op(handle);
// Handle errors
if (status != CUBLAS_STATUS_SUCCESS) {
results_.back().verification_map[library::Provider::kCUBLAS] =
get_cutlass_disposition(status);
return true;
}
//
// Verify results
//
results_.back().verification_map[library::Provider::kCUBLAS] =
compare_tensors(options, *gemm_workspace_.Computed,
*gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride());
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[library::Provider::kCUBLAS] ==
Disposition::kIncorrect) {
save_workspace(device_context, options, gemm_desc,
library::Provider::kCUTLASS,
library::Provider::kCUBLAS);
}
} catch (...) {
results_.back().verification_map[library::Provider::kCUBLAS] =
Disposition::kFailed;
}
#endif
// Return true means continue profiling
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Verifies CUTLASS against host and device references
bool GemmOperationProfiler::verify_with_reference_(
Options const& options, PerformanceReport& report,
DeviceContext& device_context, library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
library::GemmDescription const& gemm_desc =
static_cast<library::GemmDescription const&>(
operation->description());
//
// Initialize state
//
library::Provider references[] = {library::Provider::kReferenceDevice,
library::Provider::kReferenceHost};
for (auto provider : references) {
// Skip providers that are not enabled
if (!options.verification.provider_enabled(provider)) {
continue;
}
void* ptr_A = gemm_workspace_.A->data();
void* ptr_B = gemm_workspace_.B->data();
void* ptr_C = gemm_workspace_.C->data();
void* ptr_D = gemm_workspace_.Reference->data();
// To support the host-side reference, conditionally allocate and
// copy tensors to host memory.
std::vector<uint8_t> host_data_A;
std::vector<uint8_t> host_data_B;
std::vector<uint8_t> host_data_C;
std::vector<uint8_t> host_data_D;
if (provider == library::Provider::kReferenceHost) {
host_data_A.resize(gemm_workspace_.A->bytes());
ptr_A = host_data_A.data();
gemm_workspace_.A->copy_to_host(ptr_A);
host_data_B.resize(gemm_workspace_.B->bytes());
ptr_B = host_data_B.data();
gemm_workspace_.B->copy_to_host(ptr_B);
host_data_C.resize(gemm_workspace_.C->bytes());
ptr_C = host_data_C.data();
gemm_workspace_.C->copy_to_host(ptr_C);
host_data_D.resize(gemm_workspace_.Reference->bytes());
ptr_D = host_data_D.data();
}
//
// Launch
//
library::Handle handle;
handle.set_provider(provider);
Status status = handle.gemm_universal(
library::GemmUniversalMode::kGemm,
gemm_workspace_.configuration.problem_size.m(),
gemm_workspace_.configuration.problem_size.n(),
gemm_workspace_.configuration.problem_size.k(),
gemm_desc.tile_description.math_instruction.element_accumulator,
gemm_desc.element_epilogue,
problem_.alpha.data(),
gemm_desc.A.element, gemm_desc.A.layout, gemm_desc.transform_A,
ptr_A, int(gemm_workspace_.configuration.lda),
gemm_desc.B.element, gemm_desc.B.layout, gemm_desc.transform_B,
ptr_B, int(gemm_workspace_.configuration.ldb),
problem_.beta.data(),
gemm_desc.C.element, ptr_C,
int(gemm_workspace_.configuration.ldc),
ptr_D, int(gemm_workspace_.configuration.ldd),
gemm_workspace_.configuration.batch_count,
gemm_workspace_.A->batch_stride(),
gemm_workspace_.B->batch_stride(),
gemm_workspace_.C->batch_stride(),
gemm_workspace_.Reference->batch_stride());
if (status != Status::kSuccess) {
results_.back().verification_map[provider] = Disposition::kNotRun;
return true;
}
results_.back().status = status;
if (provider == library::Provider::kReferenceHost) {
gemm_workspace_.Reference->copy_from_host(ptr_D);
}
//
// Verify results
//
results_.back().verification_map[provider] = compare_tensors(
options, *gemm_workspace_.Computed, *gemm_workspace_.Reference,
gemm_workspace_.Computed->batch_stride());
// Save workspace if incorrect
if (options.verification.save_workspace == SaveWorkspace::kIncorrect &&
results_.back().verification_map[provider] ==
Disposition::kIncorrect) {
save_workspace(device_context, options, gemm_desc,
library::Provider::kCUTLASS, provider);
}
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Measures performance results
bool GemmOperationProfiler::profile(Options const& options,
PerformanceReport& report,
DeviceContext& device_context,
library::Operation const* operation,
ProblemSpace const& problem_space,
ProblemSpace::Problem const& problem) {
if (options.profiling.provider_enabled(library::Provider::kCUTLASS)) {
// Initialize structure containing GEMM arguments
gemm_workspace_.arguments.A = gemm_workspace_.A->data();
gemm_workspace_.arguments.B = gemm_workspace_.B->data();
gemm_workspace_.arguments.C = gemm_workspace_.C->data();
gemm_workspace_.arguments.D = gemm_workspace_.Computed->data();
gemm_workspace_.arguments.alpha = problem_.alpha.data();
gemm_workspace_.arguments.beta = problem_.beta.data();
gemm_workspace_.arguments.pointer_mode =
library::ScalarPointerMode::kHost;
gemm_workspace_.arguments.batch_stride_A =
gemm_workspace_.A->batch_stride();
gemm_workspace_.arguments.batch_stride_B =
gemm_workspace_.B->batch_stride();
gemm_workspace_.arguments.batch_stride_C =
gemm_workspace_.C->batch_stride();
gemm_workspace_.arguments.batch_stride_D =
gemm_workspace_.Computed->batch_stride();
results_.back().status =
profile_cutlass_(results_.back().runtime, options, operation,
&gemm_workspace_.arguments,
gemm_workspace_.host_workspace.data(),
gemm_workspace_.device_workspace.data());
}
return true;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/// Method to profile a CUTLASS Operation
Status GemmOperationProfiler::profile_cutlass_(
double& runtime, Options const& options,
library::Operation const* operation, void* arguments,
void* host_workspace, void* device_workspace) {
GpuTimer timer;
//
// Optional sleep to limit power consumption and thermals
//
sleep(options.profiling.sleep_duration);
//
// Warmup loop
//
Status status;
for (int iteration = 0; iteration < options.profiling.warmup_iterations;
++iteration) {
int problem_idx = (iteration % gemm_workspace_.problem_count) *
problem_.batch_count;
gemm_workspace_.arguments.A =
gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B =
gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C =
gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D =
gemm_workspace_.Computed->batch_data(problem_idx);
// Execute the CUTLASS operation
status = operation->run(&gemm_workspace_.arguments, host_workspace,
device_workspace);
if (status != Status::kSuccess) {
return status;
}
}
//
// Initialize GPU timer
//
timer.start();
//
// Profiling loop
//
int Iterations = options.profiling.iterations;
int iteration = 0;
for (; iteration < Iterations; ++iteration) {
// Iterate over copies of the problem in memory
int workspace_idx = options.profiling.warmup_iterations + iteration;
int problem_idx = (workspace_idx % gemm_workspace_.problem_count) *
problem_.batch_count;
gemm_workspace_.arguments.A =
gemm_workspace_.A->batch_data(problem_idx);
gemm_workspace_.arguments.B =
gemm_workspace_.B->batch_data(problem_idx);
gemm_workspace_.arguments.C =
gemm_workspace_.C->batch_data(problem_idx);
gemm_workspace_.arguments.D =
gemm_workspace_.Computed->batch_data(problem_idx);
status = operation->run(arguments, host_workspace, device_workspace);
if (status != Status::kSuccess) {
return status;
}
}
//
// Wait for completion
//
timer.stop_and_wait();
//
// Update performance result
//
runtime = timer.duration(iteration);
return status;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace profiler
} // namespace cutlass
/////////////////////////////////////////////////////////////////////////////////////////////////
|
7954740d69d8452a40bb8a88601ca9da4049fd5e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "square.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
float *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
square), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
square), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
square), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 7954740d69d8452a40bb8a88601ca9da4049fd5e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "square.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
float *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
square<<<gridBlock,threadBlock>>>(d_out,d_in);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
square<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
square<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
21272def4b711aa38749aa854d4dfcb2c925e8f7.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file : grid2d.cpp
* @brief : 2-dim. grid with spins "sitting" or "living" on top of it, separate implementation file, in CUDA C++11/14,
* @details : struct with smart ptrs, unique ptrs
*
* @author : Ernest Yeung <ernestyalumni@gmail.com>
* @date : 20171229
* @ref : Ch. 8 Structures, Unions, and Enumerations; Bjarne Stroustrup, The C++ Programming Language, 4th Ed.
* Addison-Wesley
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* g++ main.cpp ./structs/structs.cpp -o main
*
* */
#include "./grid2d.h"
// default constructor
Spins2d::Spins2d() : J {1.f} {}
// constructor
Spins2d::Spins2d(std::array<size_t,2> & L_is) : L_is {L_is} , J {1.f} {
// L = L_is[0] * L_is[1];
std::unique_ptr<int[], deleterZZ_struct> d_S(nullptr, deleterZZ_struct() );
hipMallocManaged((void **) &d_S, L_is[0]*L_is[1] * sizeof(int)) ;
S = std::move(d_S);
}
Spins2d::Spins2d(std::array<size_t,2> & L_is, const float J) : L_is {L_is} , J {J} {
std::unique_ptr<int[], deleterZZ_struct> d_S(nullptr, deleterZZ_struct() );
hipMallocManaged((void **) &d_S, L_is[0]*L_is[1] * sizeof(int)) ;
S = std::move(d_S);
}
// move constructor
Spins2d::Spins2d( Spins2d && old_spins2d) :
// L_is {old_spins2d.L_is}, L { old_spins2d.L },
L_is {old_spins2d.L_is}, J { old_spins2d.J },
S { std::move( old_spins2d.S ) } { }
// operator overload assignment =
Spins2d & Spins2d::operator=(Spins2d && old_spins2d) {
L_is = old_spins2d.L_is;
// L = old_spins2d.L;
J = old_spins2d.J;
S = std::move( old_spins2d.S );
return *this;
}
/** @fn entry
* @brief takes (x,y) 2-dim. grid coordinates and "flattens" them (flatten functor)
* to 1-dim. memory layout on global GPU memory
* @param i - i = 0,1,...Lx-1
* @param j - j = 0,1,...Ly-1
* @param Lx
* @return size_t k = i + j*Lx = 0,1... Lx*Ly-1
* */
__device__ size_t entry(size_t i, size_t j, size_t Lx) {
return (i + j*Lx);
}
| 21272def4b711aa38749aa854d4dfcb2c925e8f7.cu | /**
* @file : grid2d.cpp
* @brief : 2-dim. grid with spins "sitting" or "living" on top of it, separate implementation file, in CUDA C++11/14,
* @details : struct with smart ptrs, unique ptrs
*
* @author : Ernest Yeung <ernestyalumni@gmail.com>
* @date : 20171229
* @ref : Ch. 8 Structures, Unions, and Enumerations; Bjarne Stroustrup, The C++ Programming Language, 4th Ed.
* Addison-Wesley
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* g++ main.cpp ./structs/structs.cpp -o main
*
* */
#include "./grid2d.h"
// default constructor
Spins2d::Spins2d() : J {1.f} {}
// constructor
Spins2d::Spins2d(std::array<size_t,2> & L_is) : L_is {L_is} , J {1.f} {
// L = L_is[0] * L_is[1];
std::unique_ptr<int[], deleterZZ_struct> d_S(nullptr, deleterZZ_struct() );
cudaMallocManaged((void **) &d_S, L_is[0]*L_is[1] * sizeof(int)) ;
S = std::move(d_S);
}
Spins2d::Spins2d(std::array<size_t,2> & L_is, const float J) : L_is {L_is} , J {J} {
std::unique_ptr<int[], deleterZZ_struct> d_S(nullptr, deleterZZ_struct() );
cudaMallocManaged((void **) &d_S, L_is[0]*L_is[1] * sizeof(int)) ;
S = std::move(d_S);
}
// move constructor
Spins2d::Spins2d( Spins2d && old_spins2d) :
// L_is {old_spins2d.L_is}, L { old_spins2d.L },
L_is {old_spins2d.L_is}, J { old_spins2d.J },
S { std::move( old_spins2d.S ) } { }
// operator overload assignment =
Spins2d & Spins2d::operator=(Spins2d && old_spins2d) {
L_is = old_spins2d.L_is;
// L = old_spins2d.L;
J = old_spins2d.J;
S = std::move( old_spins2d.S );
return *this;
}
/** @fn entry
* @brief takes (x,y) 2-dim. grid coordinates and "flattens" them (flatten functor)
* to 1-dim. memory layout on global GPU memory
* @param i - i = 0,1,...Lx-1
* @param j - j = 0,1,...Ly-1
* @param Lx
* @return size_t k = i + j*Lx = 0,1... Lx*Ly-1
* */
__device__ size_t entry(size_t i, size_t j, size_t Lx) {
return (i + j*Lx);
}
|
c8d0878281e6cec73973edb252b587ffd10496d1.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by devjeetroy on 2/5/16.
//
#ifndef CUDA_MATRIX_LIBRARY_V2_GPUMEMORYMANAGER_H
#define CUDA_MATRIX_LIBRARY_V2_GPUMEMORYMANAGER_H
//#define __PRINT_DEVICE_PROPERTIES__
#define __THREAD_GRANULARITY__ 32
#define __MEMORY_DEBUG__
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <boost/format.hpp>
#include <iostream>
#include <math.h>
#include <stdexcept>
using std::cout;
using boost::format;
using std::vector;
class GPUMemoryManager{
private:
GPUMemoryManager(){
initialize();
}
// Don't Implement
// void operator=(const GPUMemoryManager&); // Don't implement
public:
// GPUMemoryManager(GPUMemoryManager const&) = delete;
void operator=(GPUMemoryManager const&) = delete;
GPUMemoryManager(const GPUMemoryManager&) = delete;
~GPUMemoryManager(){
hipFree(mGlobalGPUMemoryBank);
}
static GPUMemoryManager& instance(){
static GPUMemoryManager instance;
return instance;
}
void Clear(){
while(mAllocations.size() > 0)
mAllocations.pop_back();
hipFree(mGlobalGPUMemoryBank);
initialize();
}
void initialize(){
hipGetDeviceCount(&mDeviceCount);
#ifdef __PRINT_DEVICE_PROPERTIES__
if(mDeviceCount > 0)
cout << format("%d devices found on current system\n")
% mDeviceCount;
else
cout << "No devices found on system\n";
#endif
for(int i = 0; i < mDeviceCount; i++){
hipGetDeviceProperties(&mDeviceProperties, i);
#ifdef __PRINT_DEVICE_PROPERTIES__
cout << format("CUDA Device Properties for device %d:\n") % i;
cout << format("Device Name: %s\n")
% mDeviceProperties.name;
cout << format("Total Global Memory: %d bytes\n")
% mDeviceProperties.totalGlobalMem;
cout << format("Max Threads Per Block: %d\n")
% mDeviceProperties.maxThreadsPerBlock;
cout << format("Max Threads Per Multiprocessor: %d\n")
% mDeviceProperties.maxThreadsPerMultiProcessor;
cout << format("Max Grid ByteSize: %d x %d x %d\n")
% mDeviceProperties.maxGridSize[0]
% mDeviceProperties.maxGridSize[1]
% mDeviceProperties.maxGridSize[2];
cout << format("Warp ByteSize: %d\n")
% mDeviceProperties.warpSize;
cout << "\n";
#endif
}
//initialize global memory
size_t free, total;
hipMemGetInfo (&free, &total);
//890777600
mCurrentCapacity = (0.9 * free) / sizeof(int);
cout << format("Memory free/total: %d/%d\n") % free % total;
mMemoryAvailable = mCurrentCapacity;
hipError_t hipError_t = hipMalloc((void **) & mGlobalGPUMemoryBank, mCurrentCapacity * sizeof(int));
if(hipError_t != hipSuccess)
cout << format("Error allocating memory in GPUMAnager Initialize with error %s\n")
% hipGetErrorString(hipError_t);
}
void * allocate(unsigned int byteSize) {
unsigned int newEntryOffset = 0;
unsigned int prior = 0; //need to make sure that insertion is in order
// allocate to front if size == 0, else find the correct spot
if(mAllocations.size() > 0){
if (mAllocations.size() == 1) {
newEntryOffset
= RoundTo(mAllocations[0].Offset + mAllocations[0].ByteSize / sizeof(int),
__THREAD_GRANULARITY__);
//no need to change prior,
//it is already set to 0
//check if the new allocation would exceed
//global memory limits
if (newEntryOffset + byteSize / sizeof(int) >= mCurrentCapacity) {
#ifdef __MEMORY_DEBUG__
cout << "current cap: " << mCurrentCapacity;
cout << format("Failed to find a spot in gpu memory, not enough space available: allocate\n");
#endif
return nullptr;
}
} else {
// for mAllocations.size() > 1
//loop throuh all allocations to find the first slot
//where we could fit the new allocation
for (auto i = 0; i < mAllocations.size() - 1; i++) {
auto currentSlot =
RoundTo((mAllocations[i].Offset + mAllocations[i].ByteSize / sizeof(int)),
__THREAD_GRANULARITY__);
if (mAllocations[i + 1].Offset - currentSlot >= byteSize / sizeof(unsigned int)) {
newEntryOffset = currentSlot;
prior = i;
}
}
//if newEntryOffset is zero, it means no allocation space was found
//up to the last element. Now we need to check the space between
//the last element and the end of the memory bank
if (newEntryOffset == 0) {
auto lastEntry = *(std::end(mAllocations) - 1);
auto lastSlot = RoundTo(lastEntry.Offset + lastEntry.ByteSize / sizeof(int),
__THREAD_GRANULARITY__);
if (lastSlot + byteSize / sizeof(int) < mCurrentCapacity)
{
newEntryOffset = lastSlot;
prior = mAllocations.size() - 1;
}
else {
#ifdef __MEMORY_DEBUG__
cout << "lastSlot + byteSize / sizeof(int): " << lastSlot + byteSize / sizeof(int) << "\n";
format("Failed to find a spot in gpu memory, not enough space available: allocate(lastSlot)\n");
#endif
return nullptr;
}
}
}
}
if(newEntryOffset != 0)
prior++;
mAllocations.insert(std::begin(mAllocations) + prior, gpuMemoryAllocation{newEntryOffset, byteSize});
mMemoryAvailable -= byteSize / sizeof(int);
return (void *) (mGlobalGPUMemoryBank + newEntryOffset * sizeof(int));
}
void printAllocators(){
int i = 0;
cout << format("Current Memory Capacity: %d entries, %d megabytes\n")
% mCurrentCapacity % (mCurrentCapacity * sizeof(int) / (float)(1024 * 1024));
cout << format("Memory Usage: %-3d/%-3d - %-4f percent\n")
% ((mCurrentCapacity - mMemoryAvailable) * sizeof(int)) % (mCurrentCapacity * sizeof(int))
% ((mCurrentCapacity - mMemoryAvailable)/(float)mCurrentCapacity * 100.0f);
if(mAllocations.size() == 0){
cout << "No allocations made yet\n";
}else {
for (i = 0; i < mAllocations.size(); i++) {
cout << format("Entry #%-4d - Offset: %d, Count: %d, Byte Size: %d\n")
% i % mAllocations[i].Offset % (mAllocations[i].ByteSize / sizeof(int))
% mAllocations[i].ByteSize;
}
}
cout << "\n";
}
void deallocate(void * memoryAddress)
{
for(int i = 0; i < mAllocations.size(); i++){
if(mGlobalGPUMemoryBank + mAllocations[i].Offset * sizeof(int) == memoryAddress){
mMemoryAvailable += mAllocations[i].ByteSize / sizeof(int);
mAllocations.erase(std::begin(mAllocations) + i);
}
}
}
unsigned int getCurrentCapacity(){
return mCurrentCapacity;
}
int findMemorySlot(unsigned int byteSize){
return -1;
}
unsigned int RoundTo(int number, int to){
return (unsigned int) (to * ceil(number / static_cast<float>(to)));
}
private:
struct gpuMemoryAllocation{
unsigned int Offset;
unsigned int ByteSize;
}gpuMem;
void * mGlobalGPUMemoryBank;
unsigned int mCurrentCapacity;
unsigned int mMemoryAvailable;
vector<gpuMemoryAllocation> mAllocations;
int mDeviceCount;
hipDeviceProp_t mDeviceProperties;
};
#endif //CUDA_MATRIX_LIBRARY_V2_GPUMEMORYMANAGER_H
| c8d0878281e6cec73973edb252b587ffd10496d1.cu | //
// Created by devjeetroy on 2/5/16.
//
#ifndef CUDA_MATRIX_LIBRARY_V2_GPUMEMORYMANAGER_H
#define CUDA_MATRIX_LIBRARY_V2_GPUMEMORYMANAGER_H
//#define __PRINT_DEVICE_PROPERTIES__
#define __THREAD_GRANULARITY__ 32
#define __MEMORY_DEBUG__
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <boost/format.hpp>
#include <iostream>
#include <math.h>
#include <stdexcept>
using std::cout;
using boost::format;
using std::vector;
class GPUMemoryManager{
private:
GPUMemoryManager(){
initialize();
}
// Don't Implement
// void operator=(const GPUMemoryManager&); // Don't implement
public:
// GPUMemoryManager(GPUMemoryManager const&) = delete;
void operator=(GPUMemoryManager const&) = delete;
GPUMemoryManager(const GPUMemoryManager&) = delete;
~GPUMemoryManager(){
cudaFree(mGlobalGPUMemoryBank);
}
static GPUMemoryManager& instance(){
static GPUMemoryManager instance;
return instance;
}
void Clear(){
while(mAllocations.size() > 0)
mAllocations.pop_back();
cudaFree(mGlobalGPUMemoryBank);
initialize();
}
void initialize(){
cudaGetDeviceCount(&mDeviceCount);
#ifdef __PRINT_DEVICE_PROPERTIES__
if(mDeviceCount > 0)
cout << format("%d devices found on current system\n")
% mDeviceCount;
else
cout << "No devices found on system\n";
#endif
for(int i = 0; i < mDeviceCount; i++){
cudaGetDeviceProperties(&mDeviceProperties, i);
#ifdef __PRINT_DEVICE_PROPERTIES__
cout << format("CUDA Device Properties for device %d:\n") % i;
cout << format("Device Name: %s\n")
% mDeviceProperties.name;
cout << format("Total Global Memory: %d bytes\n")
% mDeviceProperties.totalGlobalMem;
cout << format("Max Threads Per Block: %d\n")
% mDeviceProperties.maxThreadsPerBlock;
cout << format("Max Threads Per Multiprocessor: %d\n")
% mDeviceProperties.maxThreadsPerMultiProcessor;
cout << format("Max Grid ByteSize: %d x %d x %d\n")
% mDeviceProperties.maxGridSize[0]
% mDeviceProperties.maxGridSize[1]
% mDeviceProperties.maxGridSize[2];
cout << format("Warp ByteSize: %d\n")
% mDeviceProperties.warpSize;
cout << "\n";
#endif
}
//initialize global memory
size_t free, total;
cudaMemGetInfo (&free, &total);
//890777600
mCurrentCapacity = (0.9 * free) / sizeof(int);
cout << format("Memory free/total: %d/%d\n") % free % total;
mMemoryAvailable = mCurrentCapacity;
cudaError cudaError = cudaMalloc((void **) & mGlobalGPUMemoryBank, mCurrentCapacity * sizeof(int));
if(cudaError != CUDA_SUCCESS)
cout << format("Error allocating memory in GPUMAnager Initialize with error %s\n")
% cudaGetErrorString(cudaError);
}
void * allocate(unsigned int byteSize) {
unsigned int newEntryOffset = 0;
unsigned int prior = 0; //need to make sure that insertion is in order
// allocate to front if size == 0, else find the correct spot
if(mAllocations.size() > 0){
if (mAllocations.size() == 1) {
newEntryOffset
= RoundTo(mAllocations[0].Offset + mAllocations[0].ByteSize / sizeof(int),
__THREAD_GRANULARITY__);
//no need to change prior,
//it is already set to 0
//check if the new allocation would exceed
//global memory limits
if (newEntryOffset + byteSize / sizeof(int) >= mCurrentCapacity) {
#ifdef __MEMORY_DEBUG__
cout << "current cap: " << mCurrentCapacity;
cout << format("Failed to find a spot in gpu memory, not enough space available: allocate\n");
#endif
return nullptr;
}
} else {
// for mAllocations.size() > 1
//loop throuh all allocations to find the first slot
//where we could fit the new allocation
for (auto i = 0; i < mAllocations.size() - 1; i++) {
auto currentSlot =
RoundTo((mAllocations[i].Offset + mAllocations[i].ByteSize / sizeof(int)),
__THREAD_GRANULARITY__);
if (mAllocations[i + 1].Offset - currentSlot >= byteSize / sizeof(unsigned int)) {
newEntryOffset = currentSlot;
prior = i;
}
}
//if newEntryOffset is zero, it means no allocation space was found
//up to the last element. Now we need to check the space between
//the last element and the end of the memory bank
if (newEntryOffset == 0) {
auto lastEntry = *(std::end(mAllocations) - 1);
auto lastSlot = RoundTo(lastEntry.Offset + lastEntry.ByteSize / sizeof(int),
__THREAD_GRANULARITY__);
if (lastSlot + byteSize / sizeof(int) < mCurrentCapacity)
{
newEntryOffset = lastSlot;
prior = mAllocations.size() - 1;
}
else {
#ifdef __MEMORY_DEBUG__
cout << "lastSlot + byteSize / sizeof(int): " << lastSlot + byteSize / sizeof(int) << "\n";
format("Failed to find a spot in gpu memory, not enough space available: allocate(lastSlot)\n");
#endif
return nullptr;
}
}
}
}
if(newEntryOffset != 0)
prior++;
mAllocations.insert(std::begin(mAllocations) + prior, gpuMemoryAllocation{newEntryOffset, byteSize});
mMemoryAvailable -= byteSize / sizeof(int);
return (void *) (mGlobalGPUMemoryBank + newEntryOffset * sizeof(int));
}
void printAllocators(){
int i = 0;
cout << format("Current Memory Capacity: %d entries, %d megabytes\n")
% mCurrentCapacity % (mCurrentCapacity * sizeof(int) / (float)(1024 * 1024));
cout << format("Memory Usage: %-3d/%-3d - %-4f percent\n")
% ((mCurrentCapacity - mMemoryAvailable) * sizeof(int)) % (mCurrentCapacity * sizeof(int))
% ((mCurrentCapacity - mMemoryAvailable)/(float)mCurrentCapacity * 100.0f);
if(mAllocations.size() == 0){
cout << "No allocations made yet\n";
}else {
for (i = 0; i < mAllocations.size(); i++) {
cout << format("Entry #%-4d - Offset: %d, Count: %d, Byte Size: %d\n")
% i % mAllocations[i].Offset % (mAllocations[i].ByteSize / sizeof(int))
% mAllocations[i].ByteSize;
}
}
cout << "\n";
}
void deallocate(void * memoryAddress)
{
for(int i = 0; i < mAllocations.size(); i++){
if(mGlobalGPUMemoryBank + mAllocations[i].Offset * sizeof(int) == memoryAddress){
mMemoryAvailable += mAllocations[i].ByteSize / sizeof(int);
mAllocations.erase(std::begin(mAllocations) + i);
}
}
}
unsigned int getCurrentCapacity(){
return mCurrentCapacity;
}
int findMemorySlot(unsigned int byteSize){
return -1;
}
unsigned int RoundTo(int number, int to){
return (unsigned int) (to * ceil(number / static_cast<float>(to)));
}
private:
struct gpuMemoryAllocation{
unsigned int Offset;
unsigned int ByteSize;
}gpuMem;
void * mGlobalGPUMemoryBank;
unsigned int mCurrentCapacity;
unsigned int mMemoryAvailable;
vector<gpuMemoryAllocation> mAllocations;
int mDeviceCount;
cudaDeviceProp mDeviceProperties;
};
#endif //CUDA_MATRIX_LIBRARY_V2_GPUMEMORYMANAGER_H
|
b33dcba83a270a9b45a7e968679f0d63437b4948.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Modifications Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include <hip/hip_runtime.h>
#include "enqueue.h"
#include "common_coll.h"
#include "param.h"
#include "collectives/collectives.h"
#define NCCL_FUNC4(coll, op, dtype) \
NCCL_KERN_NAME(coll, op, dtype), \
NCCL_KERN_NAME(coll##LL, op, dtype)
// Must be consistent with ncclDataType_t
#define NCCL_FUNCS3A(coll, op) \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, u8), \
NCCL_FUNC4(coll, op, i32), \
NCCL_FUNC4(coll, op, u32), \
NCCL_FUNC4(coll, op, i64), \
NCCL_FUNC4(coll, op, u64), \
NCCL_FUNC4(coll, op, f16), \
NCCL_FUNC4(coll, op, f32), \
NCCL_FUNC4(coll, op, f64)
#define NCCL_FUNCS3B(coll, op) \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8)
// Must be consistent with ncclRedOp_t
#define NCCL_FUNCS2A(coll) \
NCCL_FUNCS3A(coll, sum ), \
NCCL_FUNCS3A(coll, prod), \
NCCL_FUNCS3A(coll, max ), \
NCCL_FUNCS3A(coll, min )
#define NCCL_FUNCS2B(coll) \
NCCL_FUNCS3B(coll, copy), \
NCCL_FUNCS3B(coll, copy), \
NCCL_FUNCS3B(coll, copy), \
NCCL_FUNCS3B(coll, copy)
typedef void(*ncclKern_t)(struct ncclColl);
// Must be consistent with the ncclFuncSet enum
static ncclKern_t const ncclKerns[ncclCollCount*ncclNumOps*ncclNumTypes*2] = {
NCCL_FUNCS2B(ncclBroadcast),
NCCL_FUNCS2A(ncclReduce),
NCCL_FUNCS2B(ncclAllGather),
NCCL_FUNCS2A(ncclReduceScatter),
NCCL_FUNCS2A(ncclAllReduce)
};
ncclResult_t ncclLaunchCooperativeKernelMultiDevice(struct cudaLaunchParams *paramsList, int* cudaDevs, int numDevices, int cgMode) {
#if CUDART_VERSION >= 9000
if (cgMode & 0x01) {
CUDACHECK(cudaLaunchCooperativeKernelMultiDevice(paramsList, numDevices,
// These flags are to reduce the latency of using this API
cudaCooperativeLaunchMultiDeviceNoPreSync|cudaCooperativeLaunchMultiDeviceNoPostSync));
return ncclSuccess;
}
#endif
int savedDev;
CUDACHECK(hipGetDevice(&savedDev));
for (int i = 0; i < numDevices; i++) {
struct cudaLaunchParams* params = paramsList+i;
CUDACHECK(hipSetDevice(cudaDevs[i]));
hipLaunchKernelGGL(params->func, params->gridDim, params->blockDim, params->sharedMem, params->stream, **params->args);
}
CUDACHECK(hipSetDevice(savedDev));
return ncclSuccess;
}
ncclResult_t setupLaunch(struct ncclComm* comm, struct cudaLaunchParams* params) {
params->gridDim.x = ::min((int) params->gridDim.x, comm->nRings);
// Set active = 2 for the last operation
for (int r=0; r<params->gridDim.x; r++) {
struct ncclRing* ring = comm->rings+r;
STORE(&ring->collectives[(ring->collStart+ring->collCount-1)%NCCL_MAX_OPS].active, 2);
}
// Find the first operation, choose the kernel accordingly and pass it
// as the first argument.
struct ncclColl* coll = comm->rings[0].collectives+comm->rings[0].collStart;
memcpy(&comm->args, coll, sizeof(struct ncclColl));
// As we pass that coll directly, we can free it immediately.
STORE(&coll->active, 0);
params->func = ncclKerns[coll->funcIndex];
return ncclSuccess;
}
ncclResult_t ncclCpuBarrierIn(struct ncclComm* comm, int* isLast) {
volatile int* ptr = (volatile int*)(comm->intraBarrier+comm->intraPhase);
int val = LOAD(ptr);
bool done = false;
while (done == false) {
if (val >= comm->intraRanks) {
WARN("Trying to launch too many collectives");
return ncclInvalidUsage;
}
if (val+1 == comm->intraRanks) {
// Reset the barrier.
comm->intraBarrier[comm->intraPhase^1] = 0;
*isLast = 1;
return ncclSuccess;
}
done = __sync_bool_compare_and_swap(ptr, val, val+1);
val++;
}
*isLast = 0;
return ncclSuccess;
}
ncclResult_t ncclCpuBarrierLast(struct ncclComm* comm) {
volatile int* ptr = (volatile int*)(comm->intraBarrier+comm->intraPhase);
int val = LOAD(ptr);
if (__sync_bool_compare_and_swap(ptr, val, val+1) != true) {
WARN("Trying to launch too many collectives");
return ncclInternalError;
}
return ncclSuccess;
}
ncclResult_t ncclCpuBarrierOut(struct ncclComm* comm) {
volatile int* ptr = (volatile int*)(comm->intraBarrier+comm->intraPhase);
while (LOAD(ptr) < comm->intraRanks) pthread_yield();
comm->intraPhase ^= 1;
return ncclSuccess;
}
ncclResult_t ncclBarrierEnqueue(struct ncclComm* comm) {
if (comm->nRanks == 1) return ncclSuccess;
struct cudaLaunchParams* params = comm->myParams;
NCCLCHECK(setupLaunch(comm, params));
// Use internal NCCL stream for CGMD/GROUP launch if required or if the user stream is NULL
if (comm->launchMode == ncclComm::GROUP && (comm->groupCudaStream || comm->userStream == NULL)) {
// Enqueue event in user stream
CUDACHECK(hipEventRecord(comm->doneEvent, comm->userStream));
// Create dependency between user stream and internal NCCL stream
CUDACHECK(hipStreamWaitEvent(comm->groupStream, comm->doneEvent, 0));
params->stream = comm->groupStream;
} else {
if (comm->userStream != params->stream) {
// Stream changed from last call, create dependency against last NCCL kernel launch
CUDACHECK(hipStreamWaitEvent(comm->userStream, comm->doneEvent, 0));
}
params->stream = comm->userStream;
}
int isLast = 0;
NCCLCHECK(ncclCpuBarrierIn(comm, &isLast));
if (isLast) {
if (comm->launchMode == ncclComm::GROUP) {
// I'm the last. Launch all operations.
NCCLCHECK(ncclLaunchCooperativeKernelMultiDevice(comm->intraParams, comm->intraCudaDevs, comm->intraRanks, *comm->intraCGMode));
}
NCCLCHECK(ncclCpuBarrierLast(comm));
}
return ncclSuccess;
}
ncclResult_t ncclBarrierEnqueueWait(ncclComm_t comm) {
if (comm->nRanks == 1) return ncclSuccess;
// We can't print the CG mode before the first barrier happened.
if (comm->rank == 0 && *comm->intraCGMode & 0x10) {
*comm->intraCGMode ^= 0x10;
INFO(NCCL_INIT,"Launch mode %s%s%s",
comm->launchMode == ncclComm::GROUP ? "Group" : "Parallel",
*comm->intraCGMode ? "/CGMD" : "",
(comm->launchMode == ncclComm::GROUP && comm->groupCudaStream) ? "/Stream" : "");
}
NCCLCHECK(ncclCpuBarrierOut(comm));
struct cudaLaunchParams *params = comm->myParams;
if (comm->launchMode == ncclComm::PARALLEL) {
hipLaunchKernelGGL(params->func, params->gridDim, params->blockDim, params->sharedMem, params->stream, **params->args);
}
// Start the network proxies as soon as the kernel has been launched. We can't
// perform any CUDA call between the two or having a hipFree between the CUDA
// launch and the transportStartProxies call could cause a deadlock.
// Also, starting the proxies after the CUDA launch seems to be better for
// performance (latency).
for (int r=0; r<params->gridDim.x; r++) {
struct ncclRing* ring = comm->rings+r;
ring->collStart = ring->collFifoTail;
ring->collCount = 0;
}
params->gridDim.x = params->blockDim.x = 0;
NCCLCHECK(transportStartProxies(comm));
return ncclSuccess;
}
ncclResult_t ncclEnqueueEvents(ncclComm_t comm) {
struct cudaLaunchParams *params = comm->myParams;
// Enqueue event after NCCL kernel
CUDACHECK(hipEventRecord(comm->doneEvent, params->stream));
// Use internal NCCL stream for CGMD/GROUP launch if required or if the user stream is NULL
if (comm->launchMode == ncclComm::GROUP && (comm->groupCudaStream || comm->userStream == NULL)) {
// Create dependency between NCCL internal stream and user stream
CUDACHECK(hipStreamWaitEvent(comm->userStream, comm->doneEvent, 0));
}
comm->userStreamSet = false;
return ncclSuccess;
}
ncclResult_t ncclEnqueueCheck(ncclFunc_t func, const char* primName, const void* sendbuff,
void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root,
ncclComm_t comm, hipStream_t stream) {
if (comm == NULL) return ncclInvalidArgument;
// Launch asynchronously if needed
if (ncclAsyncMode()) {
ncclResult_t ret = ncclSuccess;
int savedDev = -1;
if (comm->checkPointers) {
CUDACHECKGOTO(hipGetDevice(&savedDev), ret, end);
CUDACHECKGOTO(hipSetDevice(comm->cudaDev), ret, end);
}
// Check arguments
NCCLCHECKGOTO(ArgsCheck(sendbuff, recvbuff, count, type, op, root, comm, primName), ret, end);
// Always register comm even in case of error to make sure ncclGroupEnd
// cleans it up.
NCCLCHECK(ncclAsyncColl(comm));
NCCLCHECKGOTO(func(sendbuff, recvbuff, count, type, op, root, comm, stream), ret, end);
end:
if (savedDev != -1) CUDACHECK(hipSetDevice(savedDev));
ncclAsyncErrCheck(ret);
return ret;
} else {
NCCLCHECK(ArgsCheck(sendbuff, recvbuff, count, type, op, root, comm, primName));
NCCLCHECK(func(sendbuff, recvbuff, count, type, op, root, comm, stream));
NCCLCHECK(ncclBarrierEnqueue(comm));
NCCLCHECK(ncclBarrierEnqueueWait(comm));
NCCLCHECK(ncclEnqueueEvents(comm));
return ncclSuccess;
}
}
| b33dcba83a270a9b45a7e968679f0d63437b4948.cu | /*************************************************************************
* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
* Modifications Copyright (c) 2019 Advanced Micro Devices, Inc. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include <hip/hip_runtime.h>
#include "enqueue.h"
#include "common_coll.h"
#include "param.h"
#include "collectives/collectives.h"
#define NCCL_FUNC4(coll, op, dtype) \
NCCL_KERN_NAME(coll, op, dtype), \
NCCL_KERN_NAME(coll##LL, op, dtype)
// Must be consistent with ncclDataType_t
#define NCCL_FUNCS3A(coll, op) \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, u8), \
NCCL_FUNC4(coll, op, i32), \
NCCL_FUNC4(coll, op, u32), \
NCCL_FUNC4(coll, op, i64), \
NCCL_FUNC4(coll, op, u64), \
NCCL_FUNC4(coll, op, f16), \
NCCL_FUNC4(coll, op, f32), \
NCCL_FUNC4(coll, op, f64)
#define NCCL_FUNCS3B(coll, op) \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8), \
NCCL_FUNC4(coll, op, i8)
// Must be consistent with ncclRedOp_t
#define NCCL_FUNCS2A(coll) \
NCCL_FUNCS3A(coll, sum ), \
NCCL_FUNCS3A(coll, prod), \
NCCL_FUNCS3A(coll, max ), \
NCCL_FUNCS3A(coll, min )
#define NCCL_FUNCS2B(coll) \
NCCL_FUNCS3B(coll, copy), \
NCCL_FUNCS3B(coll, copy), \
NCCL_FUNCS3B(coll, copy), \
NCCL_FUNCS3B(coll, copy)
typedef void(*ncclKern_t)(struct ncclColl);
// Must be consistent with the ncclFuncSet enum
static ncclKern_t const ncclKerns[ncclCollCount*ncclNumOps*ncclNumTypes*2] = {
NCCL_FUNCS2B(ncclBroadcast),
NCCL_FUNCS2A(ncclReduce),
NCCL_FUNCS2B(ncclAllGather),
NCCL_FUNCS2A(ncclReduceScatter),
NCCL_FUNCS2A(ncclAllReduce)
};
ncclResult_t ncclLaunchCooperativeKernelMultiDevice(struct cudaLaunchParams *paramsList, int* cudaDevs, int numDevices, int cgMode) {
#if CUDART_VERSION >= 9000
if (cgMode & 0x01) {
CUDACHECK(cudaLaunchCooperativeKernelMultiDevice(paramsList, numDevices,
// These flags are to reduce the latency of using this API
cudaCooperativeLaunchMultiDeviceNoPreSync|cudaCooperativeLaunchMultiDeviceNoPostSync));
return ncclSuccess;
}
#endif
int savedDev;
CUDACHECK(hipGetDevice(&savedDev));
for (int i = 0; i < numDevices; i++) {
struct cudaLaunchParams* params = paramsList+i;
CUDACHECK(hipSetDevice(cudaDevs[i]));
hipLaunchKernelGGL(params->func, params->gridDim, params->blockDim, params->sharedMem, params->stream, **params->args);
}
CUDACHECK(hipSetDevice(savedDev));
return ncclSuccess;
}
ncclResult_t setupLaunch(struct ncclComm* comm, struct cudaLaunchParams* params) {
params->gridDim.x = std::min((int) params->gridDim.x, comm->nRings);
// Set active = 2 for the last operation
for (int r=0; r<params->gridDim.x; r++) {
struct ncclRing* ring = comm->rings+r;
STORE(&ring->collectives[(ring->collStart+ring->collCount-1)%NCCL_MAX_OPS].active, 2);
}
// Find the first operation, choose the kernel accordingly and pass it
// as the first argument.
struct ncclColl* coll = comm->rings[0].collectives+comm->rings[0].collStart;
memcpy(&comm->args, coll, sizeof(struct ncclColl));
// As we pass that coll directly, we can free it immediately.
STORE(&coll->active, 0);
params->func = ncclKerns[coll->funcIndex];
return ncclSuccess;
}
ncclResult_t ncclCpuBarrierIn(struct ncclComm* comm, int* isLast) {
volatile int* ptr = (volatile int*)(comm->intraBarrier+comm->intraPhase);
int val = LOAD(ptr);
bool done = false;
while (done == false) {
if (val >= comm->intraRanks) {
WARN("Trying to launch too many collectives");
return ncclInvalidUsage;
}
if (val+1 == comm->intraRanks) {
// Reset the barrier.
comm->intraBarrier[comm->intraPhase^1] = 0;
*isLast = 1;
return ncclSuccess;
}
done = __sync_bool_compare_and_swap(ptr, val, val+1);
val++;
}
*isLast = 0;
return ncclSuccess;
}
ncclResult_t ncclCpuBarrierLast(struct ncclComm* comm) {
volatile int* ptr = (volatile int*)(comm->intraBarrier+comm->intraPhase);
int val = LOAD(ptr);
if (__sync_bool_compare_and_swap(ptr, val, val+1) != true) {
WARN("Trying to launch too many collectives");
return ncclInternalError;
}
return ncclSuccess;
}
ncclResult_t ncclCpuBarrierOut(struct ncclComm* comm) {
volatile int* ptr = (volatile int*)(comm->intraBarrier+comm->intraPhase);
while (LOAD(ptr) < comm->intraRanks) pthread_yield();
comm->intraPhase ^= 1;
return ncclSuccess;
}
ncclResult_t ncclBarrierEnqueue(struct ncclComm* comm) {
if (comm->nRanks == 1) return ncclSuccess;
struct cudaLaunchParams* params = comm->myParams;
NCCLCHECK(setupLaunch(comm, params));
// Use internal NCCL stream for CGMD/GROUP launch if required or if the user stream is NULL
if (comm->launchMode == ncclComm::GROUP && (comm->groupCudaStream || comm->userStream == NULL)) {
// Enqueue event in user stream
CUDACHECK(hipEventRecord(comm->doneEvent, comm->userStream));
// Create dependency between user stream and internal NCCL stream
CUDACHECK(hipStreamWaitEvent(comm->groupStream, comm->doneEvent, 0));
params->stream = comm->groupStream;
} else {
if (comm->userStream != params->stream) {
// Stream changed from last call, create dependency against last NCCL kernel launch
CUDACHECK(hipStreamWaitEvent(comm->userStream, comm->doneEvent, 0));
}
params->stream = comm->userStream;
}
int isLast = 0;
NCCLCHECK(ncclCpuBarrierIn(comm, &isLast));
if (isLast) {
if (comm->launchMode == ncclComm::GROUP) {
// I'm the last. Launch all operations.
NCCLCHECK(ncclLaunchCooperativeKernelMultiDevice(comm->intraParams, comm->intraCudaDevs, comm->intraRanks, *comm->intraCGMode));
}
NCCLCHECK(ncclCpuBarrierLast(comm));
}
return ncclSuccess;
}
ncclResult_t ncclBarrierEnqueueWait(ncclComm_t comm) {
if (comm->nRanks == 1) return ncclSuccess;
// We can't print the CG mode before the first barrier happened.
if (comm->rank == 0 && *comm->intraCGMode & 0x10) {
*comm->intraCGMode ^= 0x10;
INFO(NCCL_INIT,"Launch mode %s%s%s",
comm->launchMode == ncclComm::GROUP ? "Group" : "Parallel",
*comm->intraCGMode ? "/CGMD" : "",
(comm->launchMode == ncclComm::GROUP && comm->groupCudaStream) ? "/Stream" : "");
}
NCCLCHECK(ncclCpuBarrierOut(comm));
struct cudaLaunchParams *params = comm->myParams;
if (comm->launchMode == ncclComm::PARALLEL) {
hipLaunchKernelGGL(params->func, params->gridDim, params->blockDim, params->sharedMem, params->stream, **params->args);
}
// Start the network proxies as soon as the kernel has been launched. We can't
// perform any CUDA call between the two or having a hipFree between the CUDA
// launch and the transportStartProxies call could cause a deadlock.
// Also, starting the proxies after the CUDA launch seems to be better for
// performance (latency).
for (int r=0; r<params->gridDim.x; r++) {
struct ncclRing* ring = comm->rings+r;
ring->collStart = ring->collFifoTail;
ring->collCount = 0;
}
params->gridDim.x = params->blockDim.x = 0;
NCCLCHECK(transportStartProxies(comm));
return ncclSuccess;
}
ncclResult_t ncclEnqueueEvents(ncclComm_t comm) {
struct cudaLaunchParams *params = comm->myParams;
// Enqueue event after NCCL kernel
CUDACHECK(hipEventRecord(comm->doneEvent, params->stream));
// Use internal NCCL stream for CGMD/GROUP launch if required or if the user stream is NULL
if (comm->launchMode == ncclComm::GROUP && (comm->groupCudaStream || comm->userStream == NULL)) {
// Create dependency between NCCL internal stream and user stream
CUDACHECK(hipStreamWaitEvent(comm->userStream, comm->doneEvent, 0));
}
comm->userStreamSet = false;
return ncclSuccess;
}
ncclResult_t ncclEnqueueCheck(ncclFunc_t func, const char* primName, const void* sendbuff,
void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root,
ncclComm_t comm, hipStream_t stream) {
if (comm == NULL) return ncclInvalidArgument;
// Launch asynchronously if needed
if (ncclAsyncMode()) {
ncclResult_t ret = ncclSuccess;
int savedDev = -1;
if (comm->checkPointers) {
CUDACHECKGOTO(hipGetDevice(&savedDev), ret, end);
CUDACHECKGOTO(hipSetDevice(comm->cudaDev), ret, end);
}
// Check arguments
NCCLCHECKGOTO(ArgsCheck(sendbuff, recvbuff, count, type, op, root, comm, primName), ret, end);
// Always register comm even in case of error to make sure ncclGroupEnd
// cleans it up.
NCCLCHECK(ncclAsyncColl(comm));
NCCLCHECKGOTO(func(sendbuff, recvbuff, count, type, op, root, comm, stream), ret, end);
end:
if (savedDev != -1) CUDACHECK(hipSetDevice(savedDev));
ncclAsyncErrCheck(ret);
return ret;
} else {
NCCLCHECK(ArgsCheck(sendbuff, recvbuff, count, type, op, root, comm, primName));
NCCLCHECK(func(sendbuff, recvbuff, count, type, op, root, comm, stream));
NCCLCHECK(ncclBarrierEnqueue(comm));
NCCLCHECK(ncclBarrierEnqueueWait(comm));
NCCLCHECK(ncclEnqueueEvents(comm));
return ncclSuccess;
}
}
|
d9db9652fff6a52b00650176e5da9fd1f7f4f592.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "STREAM_Add_Optimized.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
size_t len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
STREAM_Add_Optimized), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
STREAM_Add_Optimized), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
STREAM_Add_Optimized), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d9db9652fff6a52b00650176e5da9fd1f7f4f592.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "STREAM_Add_Optimized.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
size_t len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
STREAM_Add_Optimized<<<gridBlock,threadBlock>>>(a,b,c,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
STREAM_Add_Optimized<<<gridBlock,threadBlock>>>(a,b,c,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
STREAM_Add_Optimized<<<gridBlock,threadBlock>>>(a,b,c,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a3b1753d41a0eb30505f69592cb49405432d5cf9.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| a3b1753d41a0eb30505f69592cb49405432d5cf9.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 300;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
03c0e3aa65231c98ad9b9f9ee36498404c995b50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "gen_hip.cuh"
static const int __tb_NumShortestPaths = TB_SIZE;
static const int __tb_FirstIterationSSSP = TB_SIZE;
static const int __tb_SSSP = TB_SIZE;
static const int __tb_DependencyPropagation = TB_SIZE;
static const int __tb_PredAndSucc = TB_SIZE;
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, float * p_dependency, uint32_t * p_num_predecessors, uint64_t * p_num_shortest_paths, uint32_t * p_num_successors, uint8_t * p_propagation_flag, uint64_t * p_to_add, float * p_to_add_float, uint32_t * p_trim)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_betweeness_centrality[src] = 0;
p_num_shortest_paths[src] = 0;
p_num_successors[src] = 0;
p_num_predecessors[src] = 0;
p_trim[src] = 0;
p_to_add[src] = 0;
p_to_add_float[src] = 0;
p_dependency[src] = 0;
p_propagation_flag[src] = false;
}
}
// FP: "15 -> 16;
}
__global__ void InitializeIteration(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint64_t local_current_src_node, const uint32_t local_infinity, uint32_t * p_current_length, float * p_dependency, uint32_t * p_num_predecessors, uint64_t * p_num_shortest_paths, uint32_t * p_num_successors, uint32_t * p_old_length, uint8_t * p_propagation_flag)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
bool is_source;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
is_source = graph.node_data[src] == local_current_src_node;
if (!is_source)
{
p_current_length[src] = local_infinity;
p_old_length[src] = local_infinity;
p_num_shortest_paths[src] = 0;
p_propagation_flag[src] = false;
}
else
{
p_current_length[src] = 0;
p_old_length[src] = 0;
p_num_shortest_paths[src] = 1;
p_propagation_flag[src] = true;
}
p_num_predecessors[src] = 0;
p_num_successors[src] = 0;
p_dependency[src] = 0;
}
}
// FP: "21 -> 22;
}
__global__ void FirstIterationSSSP(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_length, DynamicBitset& bitset_current_length)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_FirstIterationSSSP;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
_np_closure[threadIdx.x].src = src;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
atomicTestMin(&p_current_length[dst], new_dist);
bitset_current_length.set(dst);
}
}
// FP: "56 -> 57;
__syncthreads();
}
// FP: "58 -> 59;
// FP: "59 -> 60;
{
const int warpid = threadIdx.x / 32;
// FP: "60 -> 61;
const int _np_laneid = cub::LaneId();
// FP: "61 -> 62;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
atomicTestMin(&p_current_length[dst], new_dist);
bitset_current_length.set(dst);
}
}
}
// FP: "84 -> 85;
__syncthreads();
// FP: "85 -> 86;
}
// FP: "86 -> 87;
__syncthreads();
// FP: "87 -> 88;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "88 -> 89;
while (_np.work())
{
// FP: "89 -> 90;
int _np_i =0;
// FP: "90 -> 91;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "91 -> 92;
__syncthreads();
// FP: "92 -> 93;
// FP: "93 -> 94;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
uint32_t new_dist;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
atomicTestMin(&p_current_length[dst], new_dist);
bitset_current_length.set(dst);
}
}
// FP: "107 -> 108;
_np.execute_round_done(ITSIZE);
// FP: "108 -> 109;
__syncthreads();
}
// FP: "110 -> 111;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "112 -> 113;
}
__global__ void SSSP(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_length, uint32_t * p_old_length, DynamicBitset& bitset_current_length, HGAccumulator<uint32_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_SSSP;
__shared__ hipcub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_old_length[src] > p_current_length[src])
{
p_old_length[src] = p_current_length[src];
}
else
{
pop = false;
}
}
// FP: "14 -> 15;
// FP: "17 -> 18;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "18 -> 19;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "19 -> 20;
_np_closure[threadIdx.x].src = src;
// FP: "20 -> 21;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "23 -> 24;
// FP: "24 -> 25;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "25 -> 26;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "26 -> 27;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
while (true)
{
// FP: "31 -> 32;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "34 -> 35;
__syncthreads();
// FP: "35 -> 36;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "36 -> 37;
__syncthreads();
// FP: "37 -> 38;
break;
}
// FP: "39 -> 40;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "42 -> 43;
__syncthreads();
// FP: "43 -> 44;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "44 -> 45;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "47 -> 48;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "48 -> 49;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
uint32_t old;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
old = atomicTestMin(&p_current_length[dst], new_dist);
if (old > new_dist)
{
bitset_current_length.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "65 -> 66;
__syncthreads();
}
// FP: "67 -> 68;
// FP: "68 -> 69;
{
const int warpid = threadIdx.x / 32;
// FP: "69 -> 70;
const int _np_laneid = cub::LaneId();
// FP: "70 -> 71;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
uint32_t old;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
old = atomicTestMin(&p_current_length[dst], new_dist);
if (old > new_dist)
{
bitset_current_length.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "97 -> 98;
__syncthreads();
// FP: "98 -> 99;
}
// FP: "99 -> 100;
__syncthreads();
// FP: "100 -> 101;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "101 -> 102;
while (_np.work())
{
// FP: "102 -> 103;
int _np_i =0;
// FP: "103 -> 104;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "104 -> 105;
__syncthreads();
// FP: "105 -> 106;
// FP: "106 -> 107;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
uint32_t new_dist;
uint32_t old;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
old = atomicTestMin(&p_current_length[dst], new_dist);
if (old > new_dist)
{
bitset_current_length.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "124 -> 125;
_np.execute_round_done(ITSIZE);
// FP: "125 -> 126;
__syncthreads();
}
// FP: "127 -> 128;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "130 -> 131;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<uint32_t, TB_SIZE>>(DGAccumulator_accum_ts);
// FP: "131 -> 132;
}
__global__ void PredAndSucc(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint32_t * p_num_predecessors, uint32_t * p_num_successors, DynamicBitset& bitset_num_predecessors, DynamicBitset& bitset_num_successors)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_PredAndSucc;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
}
else
{
pop = false;
}
}
// FP: "12 -> 13;
// FP: "15 -> 16;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "16 -> 17;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "17 -> 18;
_np_closure[threadIdx.x].src = src;
// FP: "18 -> 19;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "21 -> 22;
// FP: "22 -> 23;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "23 -> 24;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "24 -> 25;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "27 -> 28;
__syncthreads();
// FP: "28 -> 29;
while (true)
{
// FP: "29 -> 30;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "34 -> 35;
__syncthreads();
// FP: "35 -> 36;
break;
}
// FP: "37 -> 38;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "40 -> 41;
__syncthreads();
// FP: "41 -> 42;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "42 -> 43;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "45 -> 46;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "46 -> 47;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_num_successors[src], (unsigned int)1);
atomicTestAdd(&p_num_predecessors[dst], (unsigned int)1);
bitset_num_successors.set(src);
bitset_num_predecessors.set(dst);
}
}
}
// FP: "61 -> 62;
__syncthreads();
}
// FP: "63 -> 64;
// FP: "64 -> 65;
{
const int warpid = threadIdx.x / 32;
// FP: "65 -> 66;
const int _np_laneid = cub::LaneId();
// FP: "66 -> 67;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_num_successors[src], (unsigned int)1);
atomicTestAdd(&p_num_predecessors[dst], (unsigned int)1);
bitset_num_successors.set(src);
bitset_num_predecessors.set(dst);
}
}
}
}
// FP: "91 -> 92;
__syncthreads();
// FP: "92 -> 93;
}
// FP: "93 -> 94;
__syncthreads();
// FP: "94 -> 95;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "95 -> 96;
while (_np.work())
{
// FP: "96 -> 97;
int _np_i =0;
// FP: "97 -> 98;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "98 -> 99;
__syncthreads();
// FP: "99 -> 100;
// FP: "100 -> 101;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_num_successors[src], (unsigned int)1);
atomicTestAdd(&p_num_predecessors[dst], (unsigned int)1);
bitset_num_successors.set(src);
bitset_num_predecessors.set(dst);
}
}
}
// FP: "116 -> 117;
_np.execute_round_done(ITSIZE);
// FP: "117 -> 118;
__syncthreads();
}
// FP: "119 -> 120;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "121 -> 122;
}
__global__ void NumShortestPathsChanges(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint32_t * p_num_predecessors, uint64_t * p_num_shortest_paths, uint8_t * p_propagation_flag, uint64_t * p_to_add, uint32_t * p_trim, DynamicBitset& bitset_num_shortest_paths)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_trim[src] > 0)
{
p_num_predecessors[src] = p_num_predecessors[src] - p_trim[src];
p_trim[src] = 0;
if (p_num_predecessors[src] == 0)
{
p_propagation_flag[src] = true;
}
}
if (p_to_add[src] > 0)
{
p_num_shortest_paths[src] += p_to_add[src];
p_to_add[src] = 0;
bitset_num_shortest_paths.set(src);
}
}
}
}
// FP: "20 -> 21;
}
__global__ void NumShortestPaths(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint64_t * p_num_shortest_paths, uint8_t * p_propagation_flag, uint64_t * p_to_add, uint32_t * p_trim, DynamicBitset& bitset_to_add, DynamicBitset& bitset_trim, HGAccumulator<uint32_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_NumShortestPaths;
__shared__ hipcub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_propagation_flag[src])
{
p_propagation_flag[src] = false;
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "16 -> 17;
// FP: "19 -> 20;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "20 -> 21;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "21 -> 22;
_np_closure[threadIdx.x].src = src;
// FP: "22 -> 23;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "25 -> 26;
// FP: "26 -> 27;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "27 -> 28;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "28 -> 29;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
while (true)
{
// FP: "33 -> 34;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "36 -> 37;
__syncthreads();
// FP: "37 -> 38;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "38 -> 39;
__syncthreads();
// FP: "39 -> 40;
break;
}
// FP: "41 -> 42;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "44 -> 45;
__syncthreads();
// FP: "45 -> 46;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "46 -> 47;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "49 -> 50;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "50 -> 51;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
uint64_t paths_to_add;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
paths_to_add = p_num_shortest_paths[src];
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_to_add[dst], paths_to_add);
atomicTestAdd(&p_trim[dst], (unsigned int)1);
bitset_to_add.set(dst);
bitset_trim.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "68 -> 69;
__syncthreads();
}
// FP: "70 -> 71;
// FP: "71 -> 72;
{
const int warpid = threadIdx.x / 32;
// FP: "72 -> 73;
const int _np_laneid = cub::LaneId();
// FP: "73 -> 74;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
uint64_t paths_to_add;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
paths_to_add = p_num_shortest_paths[src];
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_to_add[dst], paths_to_add);
atomicTestAdd(&p_trim[dst], (unsigned int)1);
bitset_to_add.set(dst);
bitset_trim.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "101 -> 102;
__syncthreads();
// FP: "102 -> 103;
}
// FP: "103 -> 104;
__syncthreads();
// FP: "104 -> 105;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "105 -> 106;
while (_np.work())
{
// FP: "106 -> 107;
int _np_i =0;
// FP: "107 -> 108;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "108 -> 109;
__syncthreads();
// FP: "109 -> 110;
// FP: "110 -> 111;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
uint64_t paths_to_add;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
paths_to_add = p_num_shortest_paths[src];
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_to_add[dst], paths_to_add);
atomicTestAdd(&p_trim[dst], (unsigned int)1);
bitset_to_add.set(dst);
bitset_trim.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "129 -> 130;
_np.execute_round_done(ITSIZE);
// FP: "130 -> 131;
__syncthreads();
}
// FP: "132 -> 133;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "136 -> 137;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<uint32_t, TB_SIZE>>(DGAccumulator_accum_ts);
// FP: "137 -> 138;
}
__global__ void PropagationFlagUpdate(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint32_t * p_num_successors, uint8_t * p_propagation_flag, DynamicBitset& bitset_propagation_flag)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_num_successors[src] == 0)
{
p_propagation_flag[src] = true;
bitset_propagation_flag.set(src);
}
}
}
}
// FP: "12 -> 13;
}
__global__ void DependencyPropChanges(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, float * p_dependency, uint32_t * p_num_successors, uint8_t * p_propagation_flag, float * p_to_add_float, uint32_t * p_trim, DynamicBitset& bitset_dependency, DynamicBitset& bitset_propagation_flag)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_to_add_float[src] > 0.0)
{
p_dependency[src] += p_to_add_float[src];
p_to_add_float[src] = 0.0;
bitset_dependency.set(src);
}
if (p_num_successors[src] == 0 && p_propagation_flag[src])
{
p_propagation_flag[src] = false;
bitset_propagation_flag.set(src);
}
else
{
if (p_trim[src] > 0)
{
p_num_successors[src] = p_num_successors[src] - p_trim[src];
p_trim[src] = 0;
if (p_num_successors[src] == 0)
{
p_propagation_flag[src] = true;
bitset_propagation_flag.set(src);
}
}
}
}
}
}
// FP: "25 -> 26;
}
__global__ void DependencyPropagation(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint64_t local_current_src_node, const uint32_t local_infinity, uint32_t * p_current_length, float * p_dependency, uint64_t * p_num_shortest_paths, uint32_t * p_num_successors, uint8_t * p_propagation_flag, float * p_to_add_float, uint32_t * p_trim, DynamicBitset& bitset_to_add_float, DynamicBitset& bitset_trim, HGAccumulator<uint32_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_DependencyPropagation;
__shared__ hipcub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_num_successors[src] > 0)
{
if (graph.node_data[src] == local_current_src_node)
{
p_num_successors[src] = 0;
}
if (graph.node_data[src] != local_current_src_node)
{
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "21 -> 22;
// FP: "24 -> 25;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "25 -> 26;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "26 -> 27;
_np_closure[threadIdx.x].src = src;
// FP: "27 -> 28;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "30 -> 31;
// FP: "31 -> 32;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "32 -> 33;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "33 -> 34;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "36 -> 37;
__syncthreads();
// FP: "37 -> 38;
while (true)
{
// FP: "38 -> 39;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "41 -> 42;
__syncthreads();
// FP: "42 -> 43;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "43 -> 44;
__syncthreads();
// FP: "44 -> 45;
break;
}
// FP: "46 -> 47;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "49 -> 50;
__syncthreads();
// FP: "50 -> 51;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "51 -> 52;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "54 -> 55;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "55 -> 56;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if (p_propagation_flag[dst])
{
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
float contrib;
atomicTestAdd(&p_trim[src], (unsigned int)1);
contrib = p_num_shortest_paths[src];
contrib /= p_num_shortest_paths[dst];
contrib *= (1.0 + p_dependency[dst]);
atomicTestAdd(&p_to_add_float[src], contrib);
bitset_trim.set(src);
bitset_to_add_float.set(src);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "77 -> 78;
__syncthreads();
}
// FP: "79 -> 80;
// FP: "80 -> 81;
{
const int warpid = threadIdx.x / 32;
// FP: "81 -> 82;
const int _np_laneid = cub::LaneId();
// FP: "82 -> 83;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if (p_propagation_flag[dst])
{
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
float contrib;
atomicTestAdd(&p_trim[src], (unsigned int)1);
contrib = p_num_shortest_paths[src];
contrib /= p_num_shortest_paths[dst];
contrib *= (1.0 + p_dependency[dst]);
atomicTestAdd(&p_to_add_float[src], contrib);
bitset_trim.set(src);
bitset_to_add_float.set(src);
DGAccumulator_accum.reduce( 1);
}
}
}
}
}
// FP: "114 -> 115;
__syncthreads();
// FP: "115 -> 116;
}
// FP: "116 -> 117;
__syncthreads();
// FP: "117 -> 118;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "118 -> 119;
while (_np.work())
{
// FP: "119 -> 120;
int _np_i =0;
// FP: "120 -> 121;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "121 -> 122;
__syncthreads();
// FP: "122 -> 123;
// FP: "123 -> 124;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if (p_propagation_flag[dst])
{
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
float contrib;
atomicTestAdd(&p_trim[src], (unsigned int)1);
contrib = p_num_shortest_paths[src];
contrib /= p_num_shortest_paths[dst];
contrib *= (1.0 + p_dependency[dst]);
atomicTestAdd(&p_to_add_float[src], contrib);
bitset_trim.set(src);
bitset_to_add_float.set(src);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "146 -> 147;
_np.execute_round_done(ITSIZE);
// FP: "147 -> 148;
__syncthreads();
}
// FP: "149 -> 150;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "153 -> 154;
DGAccumulator_accum.thread_exit<hipcub::BlockReduce<uint32_t, TB_SIZE>>(DGAccumulator_accum_ts);
// FP: "154 -> 155;
}
__global__ void BC(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, float * p_dependency)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_dependency[src] > 0)
{
atomicTestAdd(&p_betweeness_centrality[src], p_dependency[src]);
}
}
}
// FP: "9 -> 10;
}
__global__ void Sanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, HGAccumulator<float> DGAccumulator_sum, HGReduceMax<float> DGAccumulator_max, HGReduceMin<float> DGAccumulator_min)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_max_ts;
__shared__ hipcub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_min_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_sum.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGAccumulator_max.thread_entry();
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_min.thread_entry();
// FP: "7 -> 8;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
DGAccumulator_max.reduce(p_betweeness_centrality[src]);
DGAccumulator_min.reduce(p_betweeness_centrality[src]);
DGAccumulator_sum.reduce( p_betweeness_centrality[src]);
}
}
// FP: "15 -> 16;
DGAccumulator_sum.thread_exit<hipcub::BlockReduce<float, TB_SIZE>>(DGAccumulator_sum_ts);
// FP: "16 -> 17;
DGAccumulator_max.thread_exit<hipcub::BlockReduce<float, TB_SIZE>>(DGAccumulator_max_ts);
// FP: "17 -> 18;
DGAccumulator_min.thread_exit<hipcub::BlockReduce<float, TB_SIZE>>(DGAccumulator_min_ts);
// FP: "18 -> 19;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeGraph) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add.data.gpu_wr_ptr(), ctx->to_add_float.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void InitializeIteration_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( InitializeIteration) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_current_src_node, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->old_length.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeIteration_allNodes_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeIteration_cuda(0, ctx->gg.nnodes, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeIteration_masterNodes_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeIteration_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeIteration_nodesWithEdges_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeIteration_cuda(0, ctx->numNodesWithEdges, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void FirstIterationSSSP_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( FirstIterationSSSP) , dim3(blocks), dim3(__tb_FirstIterationSSSP), 0, 0, ctx->gg, __begin, __end, ctx->current_length.data.gpu_wr_ptr(), *(ctx->current_length.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void FirstIterationSSSP_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstIterationSSSP_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void FirstIterationSSSP_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstIterationSSSP_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void FirstIterationSSSP_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstIterationSSSP_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void SSSP_cuda(unsigned int __begin, unsigned int __end, uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint32_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint32_t> DGAccumulator_accumval = Shared<uint32_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
hipLaunchKernelGGL(( SSSP) , dim3(blocks), dim3(__tb_SSSP), 0, 0, ctx->gg, __begin, __end, ctx->current_length.data.gpu_wr_ptr(), ctx->old_length.data.gpu_wr_ptr(), *(ctx->current_length.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void SSSP_allNodes_cuda(uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
SSSP_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void SSSP_masterNodes_cuda(uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
SSSP_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void SSSP_nodesWithEdges_cuda(uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
SSSP_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PredAndSucc_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( PredAndSucc) , dim3(blocks), dim3(__tb_PredAndSucc), 0, 0, ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), *(ctx->num_predecessors.is_updated.gpu_rd_ptr()), *(ctx->num_successors.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PredAndSucc_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PredAndSucc_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void PredAndSucc_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PredAndSucc_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void PredAndSucc_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PredAndSucc_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPathsChanges_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( NumShortestPathsChanges) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->num_shortest_paths.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void NumShortestPathsChanges_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPathsChanges_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPathsChanges_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPathsChanges_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPathsChanges_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPathsChanges_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPaths_cuda(unsigned int __begin, unsigned int __end, uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint32_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint32_t> DGAccumulator_accumval = Shared<uint32_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
hipLaunchKernelGGL(( NumShortestPaths) , dim3(blocks), dim3(__tb_NumShortestPaths), 0, 0, ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->to_add.is_updated.gpu_rd_ptr()), *(ctx->trim.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void NumShortestPaths_allNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPaths_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void NumShortestPaths_masterNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPaths_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void NumShortestPaths_nodesWithEdges_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPaths_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void PropagationFlagUpdate_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( PropagationFlagUpdate) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), *(ctx->propagation_flag.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PropagationFlagUpdate_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PropagationFlagUpdate_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void PropagationFlagUpdate_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PropagationFlagUpdate_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void PropagationFlagUpdate_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PropagationFlagUpdate_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropChanges_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( DependencyPropChanges) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add_float.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->dependency.is_updated.gpu_rd_ptr()), *(ctx->propagation_flag.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void DependencyPropChanges_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropChanges_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropChanges_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropChanges_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropChanges_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropChanges_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropagation_cuda(unsigned int __begin, unsigned int __end, uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint32_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint32_t> DGAccumulator_accumval = Shared<uint32_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
hipLaunchKernelGGL(( DependencyPropagation) , dim3(blocks), dim3(__tb_DependencyPropagation), 0, 0, ctx->gg, __begin, __end, local_current_src_node, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add_float.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->to_add_float.is_updated.gpu_rd_ptr()), *(ctx->trim.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void DependencyPropagation_allNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropagation_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void DependencyPropagation_masterNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropagation_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void DependencyPropagation_nodesWithEdges_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropagation_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void BC_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
hipLaunchKernelGGL(( BC) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void BC_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BC_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void BC_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void BC_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BC_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void Sanity_cuda(unsigned int __begin, unsigned int __end, float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<float> _DGAccumulator_sum;
HGReduceMax<float> _DGAccumulator_max;
HGReduceMin<float> _DGAccumulator_min;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<float> DGAccumulator_sumval = Shared<float>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<float> DGAccumulator_maxval = Shared<float>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGAccumulator_maxval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGAccumulator_max.rv = DGAccumulator_maxval.gpu_wr_ptr();
// FP: "12 -> 13;
Shared<float> DGAccumulator_minval = Shared<float>(1);
// FP: "13 -> 14;
// FP: "14 -> 15;
*(DGAccumulator_minval.cpu_wr_ptr()) = 0;
// FP: "15 -> 16;
_DGAccumulator_min.rv = DGAccumulator_minval.gpu_wr_ptr();
// FP: "16 -> 17;
hipLaunchKernelGGL(( Sanity) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), _DGAccumulator_sum, _DGAccumulator_max, _DGAccumulator_min);
// FP: "17 -> 18;
check_cuda_kernel;
// FP: "18 -> 19;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "19 -> 20;
DGAccumulator_max = *(DGAccumulator_maxval.cpu_rd_ptr());
// FP: "20 -> 21;
DGAccumulator_min = *(DGAccumulator_minval.cpu_rd_ptr());
// FP: "21 -> 22;
}
void Sanity_allNodes_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
Sanity_cuda(0, ctx->gg.nnodes, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx);
// FP: "2 -> 3;
}
void Sanity_masterNodes_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
Sanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx);
// FP: "2 -> 3;
}
void Sanity_nodesWithEdges_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
Sanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx);
// FP: "2 -> 3;
}
| 03c0e3aa65231c98ad9b9f9ee36498404c995b50.cu | /*
* This file belongs to the Galois project, a C++ library for exploiting parallelism.
* The code is being released under the terms of the 3-Clause BSD License (a
* copy is located in LICENSE.txt at the top-level directory).
*
* Copyright (C) 2018, The University of Texas at Austin. All rights reserved.
* UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS
* SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF
* PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF
* DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH
* RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances
* shall University be liable for incidental, special, indirect, direct or
* consequential damages or loss of profits, interruption of business, or
* related expenses which may arise from use of Software or Documentation,
* including but not limited to those resulting from defects in Software and/or
* Documentation, or loss or inaccuracy of data of any kind.
*/
/* -*- mode: c++ -*- */
#include "gg.h"
#include "ggcuda.h"
void kernel_sizing(CSRGraph &, dim3 &, dim3 &);
#define TB_SIZE 256
const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic";
#include "gen_cuda.cuh"
static const int __tb_NumShortestPaths = TB_SIZE;
static const int __tb_FirstIterationSSSP = TB_SIZE;
static const int __tb_SSSP = TB_SIZE;
static const int __tb_DependencyPropagation = TB_SIZE;
static const int __tb_PredAndSucc = TB_SIZE;
__global__ void InitializeGraph(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, float * p_dependency, uint32_t * p_num_predecessors, uint64_t * p_num_shortest_paths, uint32_t * p_num_successors, uint8_t * p_propagation_flag, uint64_t * p_to_add, float * p_to_add_float, uint32_t * p_trim)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
p_betweeness_centrality[src] = 0;
p_num_shortest_paths[src] = 0;
p_num_successors[src] = 0;
p_num_predecessors[src] = 0;
p_trim[src] = 0;
p_to_add[src] = 0;
p_to_add_float[src] = 0;
p_dependency[src] = 0;
p_propagation_flag[src] = false;
}
}
// FP: "15 -> 16;
}
__global__ void InitializeIteration(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint64_t local_current_src_node, const uint32_t local_infinity, uint32_t * p_current_length, float * p_dependency, uint32_t * p_num_predecessors, uint64_t * p_num_shortest_paths, uint32_t * p_num_successors, uint32_t * p_old_length, uint8_t * p_propagation_flag)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
bool is_source;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
is_source = graph.node_data[src] == local_current_src_node;
if (!is_source)
{
p_current_length[src] = local_infinity;
p_old_length[src] = local_infinity;
p_num_shortest_paths[src] = 0;
p_propagation_flag[src] = false;
}
else
{
p_current_length[src] = 0;
p_old_length[src] = 0;
p_num_shortest_paths[src] = 1;
p_propagation_flag[src] = true;
}
p_num_predecessors[src] = 0;
p_num_successors[src] = 0;
p_dependency[src] = 0;
}
}
// FP: "21 -> 22;
}
__global__ void FirstIterationSSSP(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_length, DynamicBitset& bitset_current_length)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_FirstIterationSSSP;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
}
// FP: "9 -> 10;
// FP: "12 -> 13;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "13 -> 14;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "14 -> 15;
_np_closure[threadIdx.x].src = src;
// FP: "15 -> 16;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "18 -> 19;
// FP: "19 -> 20;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "20 -> 21;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "21 -> 22;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "24 -> 25;
__syncthreads();
// FP: "25 -> 26;
while (true)
{
// FP: "26 -> 27;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
break;
}
// FP: "34 -> 35;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "37 -> 38;
__syncthreads();
// FP: "38 -> 39;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "39 -> 40;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "42 -> 43;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "43 -> 44;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
atomicTestMin(&p_current_length[dst], new_dist);
bitset_current_length.set(dst);
}
}
// FP: "56 -> 57;
__syncthreads();
}
// FP: "58 -> 59;
// FP: "59 -> 60;
{
const int warpid = threadIdx.x / 32;
// FP: "60 -> 61;
const int _np_laneid = cub::LaneId();
// FP: "61 -> 62;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
atomicTestMin(&p_current_length[dst], new_dist);
bitset_current_length.set(dst);
}
}
}
// FP: "84 -> 85;
__syncthreads();
// FP: "85 -> 86;
}
// FP: "86 -> 87;
__syncthreads();
// FP: "87 -> 88;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "88 -> 89;
while (_np.work())
{
// FP: "89 -> 90;
int _np_i =0;
// FP: "90 -> 91;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "91 -> 92;
__syncthreads();
// FP: "92 -> 93;
// FP: "93 -> 94;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
uint32_t new_dist;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
atomicTestMin(&p_current_length[dst], new_dist);
bitset_current_length.set(dst);
}
}
// FP: "107 -> 108;
_np.execute_round_done(ITSIZE);
// FP: "108 -> 109;
__syncthreads();
}
// FP: "110 -> 111;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "112 -> 113;
}
__global__ void SSSP(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_length, uint32_t * p_old_length, DynamicBitset& bitset_current_length, HGAccumulator<uint32_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_SSSP;
__shared__ cub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_old_length[src] > p_current_length[src])
{
p_old_length[src] = p_current_length[src];
}
else
{
pop = false;
}
}
// FP: "14 -> 15;
// FP: "17 -> 18;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "18 -> 19;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "19 -> 20;
_np_closure[threadIdx.x].src = src;
// FP: "20 -> 21;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "23 -> 24;
// FP: "24 -> 25;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "25 -> 26;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "26 -> 27;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "29 -> 30;
__syncthreads();
// FP: "30 -> 31;
while (true)
{
// FP: "31 -> 32;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "34 -> 35;
__syncthreads();
// FP: "35 -> 36;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "36 -> 37;
__syncthreads();
// FP: "37 -> 38;
break;
}
// FP: "39 -> 40;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "42 -> 43;
__syncthreads();
// FP: "43 -> 44;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "44 -> 45;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "47 -> 48;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "48 -> 49;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
uint32_t old;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
old = atomicTestMin(&p_current_length[dst], new_dist);
if (old > new_dist)
{
bitset_current_length.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "65 -> 66;
__syncthreads();
}
// FP: "67 -> 68;
// FP: "68 -> 69;
{
const int warpid = threadIdx.x / 32;
// FP: "69 -> 70;
const int _np_laneid = cub::LaneId();
// FP: "70 -> 71;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
uint32_t new_dist;
uint32_t old;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
old = atomicTestMin(&p_current_length[dst], new_dist);
if (old > new_dist)
{
bitset_current_length.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "97 -> 98;
__syncthreads();
// FP: "98 -> 99;
}
// FP: "99 -> 100;
__syncthreads();
// FP: "100 -> 101;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "101 -> 102;
while (_np.work())
{
// FP: "102 -> 103;
int _np_i =0;
// FP: "103 -> 104;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "104 -> 105;
__syncthreads();
// FP: "105 -> 106;
// FP: "106 -> 107;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
uint32_t new_dist;
uint32_t old;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
new_dist = edge_weight + p_current_length[src];
old = atomicTestMin(&p_current_length[dst], new_dist);
if (old > new_dist)
{
bitset_current_length.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "124 -> 125;
_np.execute_round_done(ITSIZE);
// FP: "125 -> 126;
__syncthreads();
}
// FP: "127 -> 128;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "130 -> 131;
DGAccumulator_accum.thread_exit<cub::BlockReduce<uint32_t, TB_SIZE>>(DGAccumulator_accum_ts);
// FP: "131 -> 132;
}
__global__ void PredAndSucc(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint32_t * p_num_predecessors, uint32_t * p_num_successors, DynamicBitset& bitset_num_predecessors, DynamicBitset& bitset_num_successors)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_PredAndSucc;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "6 -> 7;
bool pop = src < __end;
// FP: "7 -> 8;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
}
else
{
pop = false;
}
}
// FP: "12 -> 13;
// FP: "15 -> 16;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "16 -> 17;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "17 -> 18;
_np_closure[threadIdx.x].src = src;
// FP: "18 -> 19;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "21 -> 22;
// FP: "22 -> 23;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "23 -> 24;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "24 -> 25;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "27 -> 28;
__syncthreads();
// FP: "28 -> 29;
while (true)
{
// FP: "29 -> 30;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "32 -> 33;
__syncthreads();
// FP: "33 -> 34;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "34 -> 35;
__syncthreads();
// FP: "35 -> 36;
break;
}
// FP: "37 -> 38;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "40 -> 41;
__syncthreads();
// FP: "41 -> 42;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "42 -> 43;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "45 -> 46;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "46 -> 47;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_num_successors[src], (unsigned int)1);
atomicTestAdd(&p_num_predecessors[dst], (unsigned int)1);
bitset_num_successors.set(src);
bitset_num_predecessors.set(dst);
}
}
}
// FP: "61 -> 62;
__syncthreads();
}
// FP: "63 -> 64;
// FP: "64 -> 65;
{
const int warpid = threadIdx.x / 32;
// FP: "65 -> 66;
const int _np_laneid = cub::LaneId();
// FP: "66 -> 67;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_num_successors[src], (unsigned int)1);
atomicTestAdd(&p_num_predecessors[dst], (unsigned int)1);
bitset_num_successors.set(src);
bitset_num_predecessors.set(dst);
}
}
}
}
// FP: "91 -> 92;
__syncthreads();
// FP: "92 -> 93;
}
// FP: "93 -> 94;
__syncthreads();
// FP: "94 -> 95;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "95 -> 96;
while (_np.work())
{
// FP: "96 -> 97;
int _np_i =0;
// FP: "97 -> 98;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "98 -> 99;
__syncthreads();
// FP: "99 -> 100;
// FP: "100 -> 101;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_num_successors[src], (unsigned int)1);
atomicTestAdd(&p_num_predecessors[dst], (unsigned int)1);
bitset_num_successors.set(src);
bitset_num_predecessors.set(dst);
}
}
}
// FP: "116 -> 117;
_np.execute_round_done(ITSIZE);
// FP: "117 -> 118;
__syncthreads();
}
// FP: "119 -> 120;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "121 -> 122;
}
__global__ void NumShortestPathsChanges(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint32_t * p_num_predecessors, uint64_t * p_num_shortest_paths, uint8_t * p_propagation_flag, uint64_t * p_to_add, uint32_t * p_trim, DynamicBitset& bitset_num_shortest_paths)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_trim[src] > 0)
{
p_num_predecessors[src] = p_num_predecessors[src] - p_trim[src];
p_trim[src] = 0;
if (p_num_predecessors[src] == 0)
{
p_propagation_flag[src] = true;
}
}
if (p_to_add[src] > 0)
{
p_num_shortest_paths[src] += p_to_add[src];
p_to_add[src] = 0;
bitset_num_shortest_paths.set(src);
}
}
}
}
// FP: "20 -> 21;
}
__global__ void NumShortestPaths(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint64_t * p_num_shortest_paths, uint8_t * p_propagation_flag, uint64_t * p_to_add, uint32_t * p_trim, DynamicBitset& bitset_to_add, DynamicBitset& bitset_trim, HGAccumulator<uint32_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_NumShortestPaths;
__shared__ cub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_propagation_flag[src])
{
p_propagation_flag[src] = false;
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "16 -> 17;
// FP: "19 -> 20;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "20 -> 21;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "21 -> 22;
_np_closure[threadIdx.x].src = src;
// FP: "22 -> 23;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "25 -> 26;
// FP: "26 -> 27;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "27 -> 28;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "28 -> 29;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "31 -> 32;
__syncthreads();
// FP: "32 -> 33;
while (true)
{
// FP: "33 -> 34;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "36 -> 37;
__syncthreads();
// FP: "37 -> 38;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "38 -> 39;
__syncthreads();
// FP: "39 -> 40;
break;
}
// FP: "41 -> 42;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "44 -> 45;
__syncthreads();
// FP: "45 -> 46;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "46 -> 47;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "49 -> 50;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "50 -> 51;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
uint64_t paths_to_add;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
paths_to_add = p_num_shortest_paths[src];
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_to_add[dst], paths_to_add);
atomicTestAdd(&p_trim[dst], (unsigned int)1);
bitset_to_add.set(dst);
bitset_trim.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "68 -> 69;
__syncthreads();
}
// FP: "70 -> 71;
// FP: "71 -> 72;
{
const int warpid = threadIdx.x / 32;
// FP: "72 -> 73;
const int _np_laneid = cub::LaneId();
// FP: "73 -> 74;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
uint64_t paths_to_add;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
paths_to_add = p_num_shortest_paths[src];
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_to_add[dst], paths_to_add);
atomicTestAdd(&p_trim[dst], (unsigned int)1);
bitset_to_add.set(dst);
bitset_trim.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "101 -> 102;
__syncthreads();
// FP: "102 -> 103;
}
// FP: "103 -> 104;
__syncthreads();
// FP: "104 -> 105;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "105 -> 106;
while (_np.work())
{
// FP: "106 -> 107;
int _np_i =0;
// FP: "107 -> 108;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "108 -> 109;
__syncthreads();
// FP: "109 -> 110;
// FP: "110 -> 111;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
uint64_t paths_to_add;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
paths_to_add = p_num_shortest_paths[src];
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
atomicTestAdd(&p_to_add[dst], paths_to_add);
atomicTestAdd(&p_trim[dst], (unsigned int)1);
bitset_to_add.set(dst);
bitset_trim.set(dst);
DGAccumulator_accum.reduce( 1);
}
}
}
// FP: "129 -> 130;
_np.execute_round_done(ITSIZE);
// FP: "130 -> 131;
__syncthreads();
}
// FP: "132 -> 133;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "136 -> 137;
DGAccumulator_accum.thread_exit<cub::BlockReduce<uint32_t, TB_SIZE>>(DGAccumulator_accum_ts);
// FP: "137 -> 138;
}
__global__ void PropagationFlagUpdate(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, uint32_t * p_num_successors, uint8_t * p_propagation_flag, DynamicBitset& bitset_propagation_flag)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_num_successors[src] == 0)
{
p_propagation_flag[src] = true;
bitset_propagation_flag.set(src);
}
}
}
}
// FP: "12 -> 13;
}
__global__ void DependencyPropChanges(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint32_t local_infinity, uint32_t * p_current_length, float * p_dependency, uint32_t * p_num_successors, uint8_t * p_propagation_flag, float * p_to_add_float, uint32_t * p_trim, DynamicBitset& bitset_dependency, DynamicBitset& bitset_propagation_flag)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_to_add_float[src] > 0.0)
{
p_dependency[src] += p_to_add_float[src];
p_to_add_float[src] = 0.0;
bitset_dependency.set(src);
}
if (p_num_successors[src] == 0 && p_propagation_flag[src])
{
p_propagation_flag[src] = false;
bitset_propagation_flag.set(src);
}
else
{
if (p_trim[src] > 0)
{
p_num_successors[src] = p_num_successors[src] - p_trim[src];
p_trim[src] = 0;
if (p_num_successors[src] == 0)
{
p_propagation_flag[src] = true;
bitset_propagation_flag.set(src);
}
}
}
}
}
}
// FP: "25 -> 26;
}
__global__ void DependencyPropagation(CSRGraph graph, unsigned int __begin, unsigned int __end, const uint64_t local_current_src_node, const uint32_t local_infinity, uint32_t * p_current_length, float * p_dependency, uint64_t * p_num_shortest_paths, uint32_t * p_num_successors, uint8_t * p_propagation_flag, float * p_to_add_float, uint32_t * p_trim, DynamicBitset& bitset_to_add_float, DynamicBitset& bitset_trim, HGAccumulator<uint32_t> DGAccumulator_accum)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = __tb_DependencyPropagation;
__shared__ cub::BlockReduce<uint32_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts;
index_type src_end;
index_type src_rup;
// FP: "1 -> 2;
const int _NP_CROSSOVER_WP = 32;
const int _NP_CROSSOVER_TB = __kernel_tb_size;
// FP: "2 -> 3;
const int BLKSIZE = __kernel_tb_size;
const int ITSIZE = BLKSIZE * 8;
// FP: "3 -> 4;
typedef cub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan;
typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy;
// FP: "4 -> 5;
__shared__ npsTy nps ;
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_accum.thread_entry();
// FP: "7 -> 8;
src_end = __end;
src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x)));
for (index_type src = __begin + tid; src < src_rup; src += nthreads)
{
multiple_sum<2, index_type> _np_mps;
multiple_sum<2, index_type> _np_mps_total;
// FP: "8 -> 9;
bool pop = src < __end;
// FP: "9 -> 10;
if (pop)
{
if (p_current_length[src] != local_infinity)
{
if (p_num_successors[src] > 0)
{
if (graph.node_data[src] == local_current_src_node)
{
p_num_successors[src] = 0;
}
if (graph.node_data[src] != local_current_src_node)
{
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
else
{
pop = false;
}
}
// FP: "21 -> 22;
// FP: "24 -> 25;
struct NPInspector1 _np = {0,0,0,0,0,0};
// FP: "25 -> 26;
__shared__ struct { index_type src; } _np_closure [TB_SIZE];
// FP: "26 -> 27;
_np_closure[threadIdx.x].src = src;
// FP: "27 -> 28;
if (pop)
{
_np.size = (graph).getOutDegree(src);
_np.start = (graph).getFirstEdge(src);
}
// FP: "30 -> 31;
// FP: "31 -> 32;
_np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0;
_np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0;
// FP: "32 -> 33;
BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total);
// FP: "33 -> 34;
if (threadIdx.x == 0)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "36 -> 37;
__syncthreads();
// FP: "37 -> 38;
while (true)
{
// FP: "38 -> 39;
if (_np.size >= _NP_CROSSOVER_TB)
{
nps.tb.owner = threadIdx.x;
}
// FP: "41 -> 42;
__syncthreads();
// FP: "42 -> 43;
if (nps.tb.owner == MAX_TB_SIZE + 1)
{
// FP: "43 -> 44;
__syncthreads();
// FP: "44 -> 45;
break;
}
// FP: "46 -> 47;
if (nps.tb.owner == threadIdx.x)
{
nps.tb.start = _np.start;
nps.tb.size = _np.size;
nps.tb.src = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
// FP: "49 -> 50;
__syncthreads();
// FP: "50 -> 51;
int ns = nps.tb.start;
int ne = nps.tb.size;
// FP: "51 -> 52;
if (nps.tb.src == threadIdx.x)
{
nps.tb.owner = MAX_TB_SIZE + 1;
}
// FP: "54 -> 55;
assert(nps.tb.src < __kernel_tb_size);
src = _np_closure[nps.tb.src].src;
// FP: "55 -> 56;
for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE)
{
index_type current_edge;
current_edge = ns +_np_j;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if (p_propagation_flag[dst])
{
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
float contrib;
atomicTestAdd(&p_trim[src], (unsigned int)1);
contrib = p_num_shortest_paths[src];
contrib /= p_num_shortest_paths[dst];
contrib *= (1.0 + p_dependency[dst]);
atomicTestAdd(&p_to_add_float[src], contrib);
bitset_trim.set(src);
bitset_to_add_float.set(src);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "77 -> 78;
__syncthreads();
}
// FP: "79 -> 80;
// FP: "80 -> 81;
{
const int warpid = threadIdx.x / 32;
// FP: "81 -> 82;
const int _np_laneid = cub::LaneId();
// FP: "82 -> 83;
while (__any_sync(0xffffffff, _np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB))
{
if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)
{
nps.warp.owner[warpid] = _np_laneid;
}
if (nps.warp.owner[warpid] == _np_laneid)
{
nps.warp.start[warpid] = _np.start;
nps.warp.size[warpid] = _np.size;
nps.warp.src[warpid] = threadIdx.x;
_np.start = 0;
_np.size = 0;
}
index_type _np_w_start = nps.warp.start[warpid];
index_type _np_w_size = nps.warp.size[warpid];
assert(nps.warp.src[warpid] < __kernel_tb_size);
src = _np_closure[nps.warp.src[warpid]].src;
for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32)
{
index_type current_edge;
current_edge = _np_w_start +_np_ii;
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if (p_propagation_flag[dst])
{
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
float contrib;
atomicTestAdd(&p_trim[src], (unsigned int)1);
contrib = p_num_shortest_paths[src];
contrib /= p_num_shortest_paths[dst];
contrib *= (1.0 + p_dependency[dst]);
atomicTestAdd(&p_to_add_float[src], contrib);
bitset_trim.set(src);
bitset_to_add_float.set(src);
DGAccumulator_accum.reduce( 1);
}
}
}
}
}
// FP: "114 -> 115;
__syncthreads();
// FP: "115 -> 116;
}
// FP: "116 -> 117;
__syncthreads();
// FP: "117 -> 118;
_np.total = _np_mps_total.el[1];
_np.offset = _np_mps.el[1];
// FP: "118 -> 119;
while (_np.work())
{
// FP: "119 -> 120;
int _np_i =0;
// FP: "120 -> 121;
_np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x);
// FP: "121 -> 122;
__syncthreads();
// FP: "122 -> 123;
// FP: "123 -> 124;
for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE)
{
index_type current_edge;
assert(nps.fg.src[_np_i] < __kernel_tb_size);
src = _np_closure[nps.fg.src[_np_i]].src;
current_edge= nps.fg.itvalue[_np_i];
{
index_type dst;
int edge_weight;
dst = graph.getAbsDestination(current_edge);
edge_weight = 1;
#ifndef __USE_BFS__
edge_weight += graph.getAbsWeight(current_edge);
#endif
if (p_propagation_flag[dst])
{
if ((p_current_length[src] + edge_weight) == p_current_length[dst])
{
float contrib;
atomicTestAdd(&p_trim[src], (unsigned int)1);
contrib = p_num_shortest_paths[src];
contrib /= p_num_shortest_paths[dst];
contrib *= (1.0 + p_dependency[dst]);
atomicTestAdd(&p_to_add_float[src], contrib);
bitset_trim.set(src);
bitset_to_add_float.set(src);
DGAccumulator_accum.reduce( 1);
}
}
}
}
// FP: "146 -> 147;
_np.execute_round_done(ITSIZE);
// FP: "147 -> 148;
__syncthreads();
}
// FP: "149 -> 150;
assert(threadIdx.x < __kernel_tb_size);
src = _np_closure[threadIdx.x].src;
}
// FP: "153 -> 154;
DGAccumulator_accum.thread_exit<cub::BlockReduce<uint32_t, TB_SIZE>>(DGAccumulator_accum_ts);
// FP: "154 -> 155;
}
__global__ void BC(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, float * p_dependency)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
index_type src_end;
// FP: "1 -> 2;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
if (p_dependency[src] > 0)
{
atomicTestAdd(&p_betweeness_centrality[src], p_dependency[src]);
}
}
}
// FP: "9 -> 10;
}
__global__ void Sanity(CSRGraph graph, unsigned int __begin, unsigned int __end, float * p_betweeness_centrality, HGAccumulator<float> DGAccumulator_sum, HGReduceMax<float> DGAccumulator_max, HGReduceMin<float> DGAccumulator_min)
{
unsigned tid = TID_1D;
unsigned nthreads = TOTAL_THREADS_1D;
const unsigned __kernel_tb_size = TB_SIZE;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_sum_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_max_ts;
__shared__ cub::BlockReduce<float, TB_SIZE>::TempStorage DGAccumulator_min_ts;
index_type src_end;
// FP: "1 -> 2;
// FP: "2 -> 3;
DGAccumulator_sum.thread_entry();
// FP: "3 -> 4;
// FP: "4 -> 5;
DGAccumulator_max.thread_entry();
// FP: "5 -> 6;
// FP: "6 -> 7;
DGAccumulator_min.thread_entry();
// FP: "7 -> 8;
src_end = __end;
for (index_type src = __begin + tid; src < src_end; src += nthreads)
{
bool pop = src < __end;
if (pop)
{
DGAccumulator_max.reduce(p_betweeness_centrality[src]);
DGAccumulator_min.reduce(p_betweeness_centrality[src]);
DGAccumulator_sum.reduce( p_betweeness_centrality[src]);
}
}
// FP: "15 -> 16;
DGAccumulator_sum.thread_exit<cub::BlockReduce<float, TB_SIZE>>(DGAccumulator_sum_ts);
// FP: "16 -> 17;
DGAccumulator_max.thread_exit<cub::BlockReduce<float, TB_SIZE>>(DGAccumulator_max_ts);
// FP: "17 -> 18;
DGAccumulator_min.thread_exit<cub::BlockReduce<float, TB_SIZE>>(DGAccumulator_min_ts);
// FP: "18 -> 19;
}
void InitializeGraph_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeGraph <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add.data.gpu_wr_ptr(), ctx->to_add_float.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeGraph_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void InitializeGraph_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeGraph_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void InitializeIteration_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
InitializeIteration <<<blocks, threads>>>(ctx->gg, __begin, __end, local_current_src_node, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->old_length.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void InitializeIteration_allNodes_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeIteration_cuda(0, ctx->gg.nnodes, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeIteration_masterNodes_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeIteration_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void InitializeIteration_nodesWithEdges_cuda(const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
InitializeIteration_cuda(0, ctx->numNodesWithEdges, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void FirstIterationSSSP_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
FirstIterationSSSP <<<blocks, __tb_FirstIterationSSSP>>>(ctx->gg, __begin, __end, ctx->current_length.data.gpu_wr_ptr(), *(ctx->current_length.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void FirstIterationSSSP_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstIterationSSSP_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void FirstIterationSSSP_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstIterationSSSP_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void FirstIterationSSSP_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
FirstIterationSSSP_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void SSSP_cuda(unsigned int __begin, unsigned int __end, uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint32_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint32_t> DGAccumulator_accumval = Shared<uint32_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
SSSP <<<blocks, __tb_SSSP>>>(ctx->gg, __begin, __end, ctx->current_length.data.gpu_wr_ptr(), ctx->old_length.data.gpu_wr_ptr(), *(ctx->current_length.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void SSSP_allNodes_cuda(uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
SSSP_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void SSSP_masterNodes_cuda(uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
SSSP_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void SSSP_nodesWithEdges_cuda(uint32_t & DGAccumulator_accum, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
SSSP_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx);
// FP: "2 -> 3;
}
void PredAndSucc_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
PredAndSucc <<<blocks, __tb_PredAndSucc>>>(ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), *(ctx->num_predecessors.is_updated.gpu_rd_ptr()), *(ctx->num_successors.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PredAndSucc_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PredAndSucc_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void PredAndSucc_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PredAndSucc_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void PredAndSucc_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PredAndSucc_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPathsChanges_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
NumShortestPathsChanges <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_predecessors.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->num_shortest_paths.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void NumShortestPathsChanges_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPathsChanges_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPathsChanges_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPathsChanges_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPathsChanges_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPathsChanges_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void NumShortestPaths_cuda(unsigned int __begin, unsigned int __end, uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint32_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint32_t> DGAccumulator_accumval = Shared<uint32_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
NumShortestPaths <<<blocks, __tb_NumShortestPaths>>>(ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->to_add.is_updated.gpu_rd_ptr()), *(ctx->trim.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void NumShortestPaths_allNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPaths_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void NumShortestPaths_masterNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPaths_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void NumShortestPaths_nodesWithEdges_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
NumShortestPaths_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void PropagationFlagUpdate_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
PropagationFlagUpdate <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), *(ctx->propagation_flag.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void PropagationFlagUpdate_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PropagationFlagUpdate_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void PropagationFlagUpdate_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PropagationFlagUpdate_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void PropagationFlagUpdate_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
PropagationFlagUpdate_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropChanges_cuda(unsigned int __begin, unsigned int __end, const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
DependencyPropChanges <<<blocks, threads>>>(ctx->gg, __begin, __end, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add_float.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->dependency.is_updated.gpu_rd_ptr()), *(ctx->propagation_flag.is_updated.gpu_rd_ptr()));
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void DependencyPropChanges_allNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropChanges_cuda(0, ctx->gg.nnodes, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropChanges_masterNodes_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropChanges_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropChanges_nodesWithEdges_cuda(const uint32_t & local_infinity, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropChanges_cuda(0, ctx->numNodesWithEdges, local_infinity, ctx);
// FP: "2 -> 3;
}
void DependencyPropagation_cuda(unsigned int __begin, unsigned int __end, uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<uint32_t> _DGAccumulator_accum;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<uint32_t> DGAccumulator_accumval = Shared<uint32_t>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_accumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr();
// FP: "8 -> 9;
DependencyPropagation <<<blocks, __tb_DependencyPropagation>>>(ctx->gg, __begin, __end, local_current_src_node, local_infinity, ctx->current_length.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr(), ctx->num_shortest_paths.data.gpu_wr_ptr(), ctx->num_successors.data.gpu_wr_ptr(), ctx->propagation_flag.data.gpu_wr_ptr(), ctx->to_add_float.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), *(ctx->to_add_float.is_updated.gpu_rd_ptr()), *(ctx->trim.is_updated.gpu_rd_ptr()), _DGAccumulator_accum);
// FP: "9 -> 10;
check_cuda_kernel;
// FP: "10 -> 11;
DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr());
// FP: "11 -> 12;
}
void DependencyPropagation_allNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropagation_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void DependencyPropagation_masterNodes_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropagation_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void DependencyPropagation_nodesWithEdges_cuda(uint32_t & DGAccumulator_accum, const uint32_t & local_infinity, const uint64_t & local_current_src_node, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
DependencyPropagation_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_infinity, local_current_src_node, ctx);
// FP: "2 -> 3;
}
void BC_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
BC <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), ctx->dependency.data.gpu_wr_ptr());
// FP: "5 -> 6;
check_cuda_kernel;
// FP: "6 -> 7;
}
void BC_allNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BC_cuda(0, ctx->gg.nnodes, ctx);
// FP: "2 -> 3;
}
void BC_masterNodes_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BC_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx);
// FP: "2 -> 3;
}
void BC_nodesWithEdges_cuda(struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
BC_cuda(0, ctx->numNodesWithEdges, ctx);
// FP: "2 -> 3;
}
void Sanity_cuda(unsigned int __begin, unsigned int __end, float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
dim3 blocks;
dim3 threads;
HGAccumulator<float> _DGAccumulator_sum;
HGReduceMax<float> _DGAccumulator_max;
HGReduceMin<float> _DGAccumulator_min;
// FP: "1 -> 2;
// FP: "2 -> 3;
// FP: "3 -> 4;
kernel_sizing(blocks, threads);
// FP: "4 -> 5;
Shared<float> DGAccumulator_sumval = Shared<float>(1);
// FP: "5 -> 6;
// FP: "6 -> 7;
*(DGAccumulator_sumval.cpu_wr_ptr()) = 0;
// FP: "7 -> 8;
_DGAccumulator_sum.rv = DGAccumulator_sumval.gpu_wr_ptr();
// FP: "8 -> 9;
Shared<float> DGAccumulator_maxval = Shared<float>(1);
// FP: "9 -> 10;
// FP: "10 -> 11;
*(DGAccumulator_maxval.cpu_wr_ptr()) = 0;
// FP: "11 -> 12;
_DGAccumulator_max.rv = DGAccumulator_maxval.gpu_wr_ptr();
// FP: "12 -> 13;
Shared<float> DGAccumulator_minval = Shared<float>(1);
// FP: "13 -> 14;
// FP: "14 -> 15;
*(DGAccumulator_minval.cpu_wr_ptr()) = 0;
// FP: "15 -> 16;
_DGAccumulator_min.rv = DGAccumulator_minval.gpu_wr_ptr();
// FP: "16 -> 17;
Sanity <<<blocks, threads>>>(ctx->gg, __begin, __end, ctx->betweeness_centrality.data.gpu_wr_ptr(), _DGAccumulator_sum, _DGAccumulator_max, _DGAccumulator_min);
// FP: "17 -> 18;
check_cuda_kernel;
// FP: "18 -> 19;
DGAccumulator_sum = *(DGAccumulator_sumval.cpu_rd_ptr());
// FP: "19 -> 20;
DGAccumulator_max = *(DGAccumulator_maxval.cpu_rd_ptr());
// FP: "20 -> 21;
DGAccumulator_min = *(DGAccumulator_minval.cpu_rd_ptr());
// FP: "21 -> 22;
}
void Sanity_allNodes_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
Sanity_cuda(0, ctx->gg.nnodes, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx);
// FP: "2 -> 3;
}
void Sanity_masterNodes_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
Sanity_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx);
// FP: "2 -> 3;
}
void Sanity_nodesWithEdges_cuda(float & DGAccumulator_sum, float & DGAccumulator_max, float & DGAccumulator_min, struct CUDA_Context* ctx)
{
// FP: "1 -> 2;
Sanity_cuda(0, ctx->numNodesWithEdges, DGAccumulator_sum, DGAccumulator_max, DGAccumulator_min, ctx);
// FP: "2 -> 3;
}
|
42a56e27659b7f81a8f8a8cc285fa4f1ca94f2fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "math.h"
#include "hipcub/hipcub.hpp"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include "LayerNorm.h"
namespace cuBERT {
template <typename T>
__global__ void kernel_momentum_cub(const T *__restrict__ in,
const T *__restrict__ inout,
const int batch_size,
const int channel,
T *mean_out,
T *var_out) {
__shared__ typename hipcub::BlockReduce<float, 128>::TempStorage m_storage;
__shared__ typename hipcub::BlockReduce<float, 128>::TempStorage v_storage;
const float scale = 1.f / channel;
for (int i = blockIdx.x; i < batch_size; i += gridDim.x) {
float m_val = 0;
float v_val = 0;
for (int j = threadIdx.x; j < channel; j += blockDim.x) {
const int X_index = i * channel + j;
const float t = (float) __ldg(in + X_index) + (float) __ldg(inout + X_index);
m_val += t;
v_val += t * t;
}
m_val = hipcub::BlockReduce<float, 128>(m_storage).Sum(m_val);
v_val = hipcub::BlockReduce<float, 128>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean_out[i] = mu;
var_out[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void kernel_batchnorm_(const T *__restrict__ in,
T *inout,
const int batch_size,
const int channel,
const T *__restrict__ mean_in,
const T *__restrict__ var_in,
const T *__restrict__ beta,
const T *__restrict__ gamma) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * channel) {
return;
}
int batch_idx = idx / channel;
int channel_idx = idx % channel;
float mean = (float) __ldg(mean_in + batch_idx);
float var = (float) __ldg(var_in + batch_idx);
// 1 / sqrt(var)
var = rsqrtf(var + 1e-12);
float _beta = (float) __ldg(beta + channel_idx);
float _gamma = (float) __ldg(gamma + channel_idx);
float _inout = (float) __ldg(inout + idx);
float _in = (float) __ldg(in + idx);
inout[idx] = _beta + _gamma * var * (_inout + _in - mean);
}
template <typename T>
__host__ void layer_norm_(const T *in,
T *inout,
const int batch_size,
const int channel,
T *mean_gpu,
T *var_gpu,
const T *beta,
const T *gamma,
void *stream) {
hipLaunchKernelGGL(( kernel_momentum_cub<T>) , dim3(batch_size), dim3(128), 0, (hipStream_t) stream, in, inout, batch_size, channel, mean_gpu, var_gpu);
const int all_blocks = (batch_size * channel + 127) / 128;
hipLaunchKernelGGL(( kernel_batchnorm_<T>) , dim3(all_blocks), dim3(128), 0, (hipStream_t) stream, in, inout, batch_size, channel, mean_gpu, var_gpu, beta, gamma);
}
template
__host__ void layer_norm_<float>(const float *in,
float *inout,
const int batch_size,
const int channel,
float *mean_gpu,
float *var_gpu,
const float *beta,
const float *gamma,
void *stream);
template
__host__ void layer_norm_<half>(const half *in,
half *inout,
const int batch_size,
const int channel,
half *mean_gpu,
half *var_gpu,
const half *beta,
const half *gamma,
void *stream);
}
| 42a56e27659b7f81a8f8a8cc285fa4f1ca94f2fd.cu | #include "math.h"
#include "cub/cub.cuh"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include "LayerNorm.h"
namespace cuBERT {
template <typename T>
__global__ void kernel_momentum_cub(const T *__restrict__ in,
const T *__restrict__ inout,
const int batch_size,
const int channel,
T *mean_out,
T *var_out) {
__shared__ typename cub::BlockReduce<float, 128>::TempStorage m_storage;
__shared__ typename cub::BlockReduce<float, 128>::TempStorage v_storage;
const float scale = 1.f / channel;
for (int i = blockIdx.x; i < batch_size; i += gridDim.x) {
float m_val = 0;
float v_val = 0;
for (int j = threadIdx.x; j < channel; j += blockDim.x) {
const int X_index = i * channel + j;
const float t = (float) __ldg(in + X_index) + (float) __ldg(inout + X_index);
m_val += t;
v_val += t * t;
}
m_val = cub::BlockReduce<float, 128>(m_storage).Sum(m_val);
v_val = cub::BlockReduce<float, 128>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const float mu = m_val * scale;
mean_out[i] = mu;
var_out[i] = v_val * scale - mu * mu;
}
__syncthreads();
}
}
template <typename T>
__global__ void kernel_batchnorm_(const T *__restrict__ in,
T *inout,
const int batch_size,
const int channel,
const T *__restrict__ mean_in,
const T *__restrict__ var_in,
const T *__restrict__ beta,
const T *__restrict__ gamma) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= batch_size * channel) {
return;
}
int batch_idx = idx / channel;
int channel_idx = idx % channel;
float mean = (float) __ldg(mean_in + batch_idx);
float var = (float) __ldg(var_in + batch_idx);
// 1 / sqrt(var)
var = rsqrtf(var + 1e-12);
float _beta = (float) __ldg(beta + channel_idx);
float _gamma = (float) __ldg(gamma + channel_idx);
float _inout = (float) __ldg(inout + idx);
float _in = (float) __ldg(in + idx);
inout[idx] = _beta + _gamma * var * (_inout + _in - mean);
}
template <typename T>
__host__ void layer_norm_(const T *in,
T *inout,
const int batch_size,
const int channel,
T *mean_gpu,
T *var_gpu,
const T *beta,
const T *gamma,
void *stream) {
kernel_momentum_cub<T> <<<batch_size, 128, 0, (cudaStream_t) stream>>> (in, inout, batch_size, channel, mean_gpu, var_gpu);
const int all_blocks = (batch_size * channel + 127) / 128;
kernel_batchnorm_<T> <<<all_blocks, 128, 0, (cudaStream_t) stream>>> (in, inout, batch_size, channel, mean_gpu, var_gpu, beta, gamma);
}
template
__host__ void layer_norm_<float>(const float *in,
float *inout,
const int batch_size,
const int channel,
float *mean_gpu,
float *var_gpu,
const float *beta,
const float *gamma,
void *stream);
template
__host__ void layer_norm_<half>(const half *in,
half *inout,
const int batch_size,
const int channel,
half *mean_gpu,
half *var_gpu,
const half *beta,
const half *gamma,
void *stream);
}
|
61c937365e4aaf76886be4b0787dae5f90ab717b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <ctime>
#include <hiprand/hiprand_kernel.h>
#include <fstream>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define newConst 100100
using namespace std;
__global__ void colourCountFunc (int *colouring, int n, int *propagationArray){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
propagationArray[colouring[i]-1]=1;
}
__global__ void setup_kernel (hiprandState_t * state, unsigned long seed )
{
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprand_init (seed, i, 0, &state[i]);
}
__global__ void randomColouring (hiprandState_t* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
hiprandState_t localState = globalState[i];
float RANDOM = hiprand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void conflictDetection (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m, int *detectConflict){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int myColour = degreeCount[i];
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
for (int j=start; j<stop; j++){
if (degreeCount[neighbourArray[j]-1] == myColour){
// detectConflict[i]=1;
// break;
if (i < neighbourArray[j]-1){
if (detectConflict[i]!=1){
detectConflict[i]=1;
}
}
else if (detectConflict[neighbourArray[j]-1]!=1){
detectConflict[neighbourArray[j]-1]=1;
}
// if (detectConflict[i]!=1){
// detectConflict[i]=1;
// }
//
// if (detectConflict[neighbourArray[j]-1]!=1){
// detectConflict[neighbourArray[j]-1]=1;
// }
}
}
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
stop = vertexArray[i+1];
diff = stop-start;
degreeCount[i]=diff;
}
void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){
for (int i=0; i<n-1; i++){
for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){
cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
for (int j = vertexArray[n-1]; j < m; ++j)
{
cout<<"e "<<n<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
int main(int argc, char const *argv[])
{
int n, m;
cin>>n>>m;
ofstream fout;
fout.open("output5.txt",ios::app);
int *h_vertexArray = new int [n+1];
int *h_neighbourArray = new int [2*m];
int *h_degreeCount = new int [n];
int *h_detectConflict = new int [n];
int *d_vertexArray = NULL;
hipMalloc((void **)&d_vertexArray, (n+1)*sizeof(int));
int *d_neighbourArray = NULL;
hipMalloc((void **)&d_neighbourArray, 2*m*sizeof(int));
int *d_detectConflict = NULL;
hipMalloc((void **)&d_detectConflict, (n)*sizeof(int));
hipMemset((void *)d_detectConflict, 0, (n)*sizeof(int));
int *d_degreeCount = NULL;
hipMalloc((void **)&d_degreeCount, (n)*sizeof(int));
hipMemset((void *)d_degreeCount, 0, (n)*sizeof(int));
int *d_propagationArray1 = NULL;
hipMalloc((void **)&d_propagationArray1, (newConst)*sizeof(int));
hipMemset((void *)d_propagationArray1, 0, (newConst)*sizeof(int));
hiprandState_t* devStates;
hipMalloc ( &devStates, n*sizeof( hiprandState_t ) );
for (int i = 0; i < n+1; ++i)
{
h_vertexArray[i]=2*m;
}
int NSlast = 0;
int NSoffset = 0;
int NSprev=0;
for (int i=0; i<2*m; i++){
int start, end;
cin>>start>>end;
for (int j=NSlast+1; j<start; j++){
h_vertexArray[j-1]=NSoffset;
}
if (NSprev!=start){
NSlast=start;
h_vertexArray[start-1]=NSoffset;
NSprev=start;
}
h_neighbourArray[NSoffset]=end;
NSoffset++;
}
hipEvent_t start, stop;
float timeNew;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipMemcpy(d_vertexArray, h_vertexArray, (n+1)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), hipMemcpyHostToDevice);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
//cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( degreeCalc), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m);
hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost);
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
//
thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount);
int max = *(thrust::max_element(d_ptr, d_ptr + n));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// int result = thrust::reduce(h_degreeCount, h_degreeCount + n,
// -1,
// thrust::maximum<int>());
// cout<<"Result: "<<result<<endl<<max;
cout<<"Max = "<<max<<endl;
hipEventRecord(start, 0);
hipLaunchKernelGGL(( setup_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, time(NULL));
// Except for Cliques and Odd Cycles, Brook's theorem states that only Max Degree colours are enough at most
hipLaunchKernelGGL(( randomColouring), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devStates, d_degreeCount, n, max+1);
hipMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
hipEventRecord(start, 0);
hipLaunchKernelGGL(( conflictDetection), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
hipMemcpy(h_detectConflict, d_detectConflict, n*sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
thrust::device_ptr<int> d_detectConflict_ptr = thrust::device_pointer_cast(d_detectConflict);
int count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
// for (int i=0; i<n; i++){
// cout<<i+1<<": "<<h_detectConflict[i]<<endl;
// }
cout<<"Count: "<<count1<<endl;
int countnew=0;
// clock_t tStart = clock();
hipEventRecord(start, 0);
for (int i=0; i<n; i++){
if (h_detectConflict[i]==0){
continue;
}
countnew++;
bool usedColours[max+1];
fill(usedColours, usedColours+max+1, false);
// if (flag){
// flag = false;
// for (int j=0; j<n; j++){
// cout<<usedColours[i]<<endl;
// }
// }
int start = -1, stop = -1;
start = h_vertexArray[i];
stop = h_vertexArray[i+1];
// cout<<"My id: "<<i<<endl;
//
// cout<<"My colour: "<<h_degreeCount[i]<<endl;
//
// cout<<"Neighbours"<<endl;
//
for (int j=start; j<stop; j++){
// cout<<h_degreeCount[h_neighbourArray[j]-1]<<" ";
usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
}
// cout<<endl;
for (int j=0; j<max+1; j++){
if (usedColours[j]==false){
h_degreeCount[i]=j+1;
// cout<<"My new Colour: "<<j+1<<endl;
break;
}
}
}
// if (h_detectConflict[n-1]!=0){
// bool usedColours[max+1];
//
// countnew++;
//
// fill(usedColours, usedColours+max+1, false);
//
// int start = -1, stop = -1;
//
// start = h_vertexArray[n-1];
//
// stop = 2*m;
//
//
// for (int j=start; j<stop; j++){
// usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
// }
//
// for (int j=0; j<max+1; j++){
// if (usedColours[j]==false){
// h_degreeCount[n-1]=j+1;
// break;
// }
// }
// }
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t"<<max<<"\t"<<count1<<"\t";
// fout<<"Time: "<<(double)(clock() - tStart)/CLOCKS_PER_SEC;
//
// cout<<"SHAMILASADJKAJSDKLJASHDKJASHLDKASJKD";
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
// for (int i=0; i<n-1; i++){
//
// int start = -1, stop = -1;
//
// start = h_vertexArray[i];
//
// stop = h_vertexArray[i+1];
//
// cout<<"My id: "<<i<<endl;
//
// cout<<"My colour: "<<h_degreeCount[i]<<endl;
//
// cout<<"Neighbours"<<endl;
//
// for (int j=start; j<stop; j++){
// cout<<h_degreeCount[h_neighbourArray[j]-1]<<" ";
// }
// }
//
//
//
// if (h_detectConflict[n-1]!=0){
// int start = -1, stop = -1;
//
// start = h_vertexArray[n-1];
//
// stop = m;
//
// cout<<"My id: "<<n-1<<endl;
//
// cout<<"My colour: "<<h_degreeCount[n-1]<<endl;
//
// cout<<"Neighbours"<<endl;
//
// for (int j=start; j<stop; j++){
// cout<<h_degreeCount[h_neighbourArray[j]-1]<<" ";
// }
// }
cout<<"Shamil"<<endl;
hipMemset((void *)d_detectConflict, 0, (n)*sizeof(int));
hipMemcpy(d_degreeCount, h_degreeCount, n*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( conflictDetection), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
cout<<"Count: "<<count1<<" "<<countnew<<endl;
thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_degreeCount);
int maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
int maxColourNew;
thrust::device_ptr<int> d_propagationArray_ptr = thrust::device_pointer_cast(d_propagationArray1);
maxColourNew = 0;
hipLaunchKernelGGL(( colourCountFunc), dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_degreeCount, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + newConst);
hipMemset((void *)d_propagationArray1, 0, (newConst)*sizeof(int));
fout<<maxColourNew<<"\n";
// for (int i=0; i<n; i++){
// if (h_degreeCount[i] == max+1){
// cout<<"BUHAHAHAHAHAHHAHAHAHHAHA"<<endl;
// }
//
// else if (h_degreeCount[i] == 1){
// cout<<"LALLLALALALALALALALALLALA"<<endl;
// }
// cout<<h_degreeCount[i]<<endl;
// }
// for (int i=0; i<n; i++){
// cout<<i+1<<": "<<h_detectConflict[i]<<endl;
// }
//edgesPrint(h_vertexArray, h_neighbourArray, n, m);
//delete[] h_vertexArray;
//delete[] h_neighbourArray;
//delete[] h_degreeCount;
delete[] h_vertexArray;
delete[] h_neighbourArray;
delete[] h_degreeCount;
delete[] h_detectConflict;
hipFree(d_neighbourArray);
hipFree(d_vertexArray);
hipFree(d_degreeCount);
hipFree(d_detectConflict);
fout.close();
hipDeviceReset();
return 0;
}
| 61c937365e4aaf76886be4b0787dae5f90ab717b.cu | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <ctime>
#include <curand_kernel.h>
#include <fstream>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/extrema.h>
#include <thrust/device_ptr.h>
#define newConst 100100
using namespace std;
__global__ void colourCountFunc (int *colouring, int n, int *propagationArray){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
propagationArray[colouring[i]-1]=1;
}
__global__ void setup_kernel (curandState * state, unsigned long seed )
{
int i= blockDim.x * blockIdx.x + threadIdx.x;
curand_init (seed, i, 0, &state[i]);
}
__global__ void randomColouring (curandState* globalState, int *degreeCount, int n, int limit){
int i= blockDim.x * blockIdx.x + threadIdx.x;
curandState localState = globalState[i];
float RANDOM = curand_uniform( &localState );
globalState[i] = localState;
RANDOM *= (limit - 1 + 0.999999);
RANDOM += 1;
degreeCount[i] = (int) RANDOM;
}
__global__ void conflictDetection (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m, int *detectConflict){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int myColour = degreeCount[i];
int start = -1, stop = -1;
start = vertexArray[i];
stop = vertexArray[i+1];
for (int j=start; j<stop; j++){
if (degreeCount[neighbourArray[j]-1] == myColour){
// detectConflict[i]=1;
// break;
if (i < neighbourArray[j]-1){
if (detectConflict[i]!=1){
detectConflict[i]=1;
}
}
else if (detectConflict[neighbourArray[j]-1]!=1){
detectConflict[neighbourArray[j]-1]=1;
}
// if (detectConflict[i]!=1){
// detectConflict[i]=1;
// }
//
// if (detectConflict[neighbourArray[j]-1]!=1){
// detectConflict[neighbourArray[j]-1]=1;
// }
}
}
}
__global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){
int i= blockDim.x * blockIdx.x + threadIdx.x;
if (i>=n){
return;
}
int start = -1, stop = -1;
int diff=0;
start = vertexArray[i];
stop = vertexArray[i+1];
diff = stop-start;
degreeCount[i]=diff;
}
void edgesPrint (int vertexArray[], int neighbourArray[], int n, int m){
for (int i=0; i<n-1; i++){
for (int j = vertexArray[i]; j < vertexArray[i+1]; ++j){
cout<<"e "<<i+1<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
for (int j = vertexArray[n-1]; j < m; ++j)
{
cout<<"e "<<n<<" "<<neighbourArray[j]<<endl;
/* code */
}
}
int main(int argc, char const *argv[])
{
int n, m;
cin>>n>>m;
ofstream fout;
fout.open("output5.txt",ios::app);
int *h_vertexArray = new int [n+1];
int *h_neighbourArray = new int [2*m];
int *h_degreeCount = new int [n];
int *h_detectConflict = new int [n];
int *d_vertexArray = NULL;
cudaMalloc((void **)&d_vertexArray, (n+1)*sizeof(int));
int *d_neighbourArray = NULL;
cudaMalloc((void **)&d_neighbourArray, 2*m*sizeof(int));
int *d_detectConflict = NULL;
cudaMalloc((void **)&d_detectConflict, (n)*sizeof(int));
cudaMemset((void *)d_detectConflict, 0, (n)*sizeof(int));
int *d_degreeCount = NULL;
cudaMalloc((void **)&d_degreeCount, (n)*sizeof(int));
cudaMemset((void *)d_degreeCount, 0, (n)*sizeof(int));
int *d_propagationArray1 = NULL;
cudaMalloc((void **)&d_propagationArray1, (newConst)*sizeof(int));
cudaMemset((void *)d_propagationArray1, 0, (newConst)*sizeof(int));
curandState* devStates;
cudaMalloc ( &devStates, n*sizeof( curandState ) );
for (int i = 0; i < n+1; ++i)
{
h_vertexArray[i]=2*m;
}
int NSlast = 0;
int NSoffset = 0;
int NSprev=0;
for (int i=0; i<2*m; i++){
int start, end;
cin>>start>>end;
for (int j=NSlast+1; j<start; j++){
h_vertexArray[j-1]=NSoffset;
}
if (NSprev!=start){
NSlast=start;
h_vertexArray[start-1]=NSoffset;
NSprev=start;
}
h_neighbourArray[NSoffset]=end;
NSoffset++;
}
cudaEvent_t start, stop;
float timeNew;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(d_vertexArray, h_vertexArray, (n+1)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_neighbourArray, h_neighbourArray, 2*m*sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
int threadsPerBlock = 512;
int blocksPerGrid = (n + threadsPerBlock -1)/threadsPerBlock;
//cout<<threadsPerBlock<<" "<<blocksPerGrid<<endl;
cudaEventRecord(start, 0);
degreeCalc<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m);
cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost);
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
//
thrust::device_ptr<int> d_ptr = thrust::device_pointer_cast(d_degreeCount);
int max = *(thrust::max_element(d_ptr, d_ptr + n));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// int result = thrust::reduce(h_degreeCount, h_degreeCount + n,
// -1,
// thrust::maximum<int>());
// cout<<"Result: "<<result<<endl<<max;
cout<<"Max = "<<max<<endl;
cudaEventRecord(start, 0);
setup_kernel<<<blocksPerGrid, threadsPerBlock>>>(devStates, time(NULL));
// Except for Cliques and Odd Cycles, Brook's theorem states that only Max Degree colours are enough at most
randomColouring<<<blocksPerGrid, threadsPerBlock>>>(devStates, d_degreeCount, n, max+1);
cudaMemcpy(h_degreeCount, d_degreeCount, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
cudaEventRecord(start, 0);
conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
cudaMemcpy(h_detectConflict, d_detectConflict, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t";
thrust::device_ptr<int> d_detectConflict_ptr = thrust::device_pointer_cast(d_detectConflict);
int count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
// for (int i=0; i<n; i++){
// cout<<i+1<<": "<<h_detectConflict[i]<<endl;
// }
cout<<"Count: "<<count1<<endl;
int countnew=0;
// clock_t tStart = clock();
cudaEventRecord(start, 0);
for (int i=0; i<n; i++){
if (h_detectConflict[i]==0){
continue;
}
countnew++;
bool usedColours[max+1];
fill(usedColours, usedColours+max+1, false);
// if (flag){
// flag = false;
// for (int j=0; j<n; j++){
// cout<<usedColours[i]<<endl;
// }
// }
int start = -1, stop = -1;
start = h_vertexArray[i];
stop = h_vertexArray[i+1];
// cout<<"My id: "<<i<<endl;
//
// cout<<"My colour: "<<h_degreeCount[i]<<endl;
//
// cout<<"Neighbours"<<endl;
//
for (int j=start; j<stop; j++){
// cout<<h_degreeCount[h_neighbourArray[j]-1]<<" ";
usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
}
// cout<<endl;
for (int j=0; j<max+1; j++){
if (usedColours[j]==false){
h_degreeCount[i]=j+1;
// cout<<"My new Colour: "<<j+1<<endl;
break;
}
}
}
// if (h_detectConflict[n-1]!=0){
// bool usedColours[max+1];
//
// countnew++;
//
// fill(usedColours, usedColours+max+1, false);
//
// int start = -1, stop = -1;
//
// start = h_vertexArray[n-1];
//
// stop = 2*m;
//
//
// for (int j=start; j<stop; j++){
// usedColours[h_degreeCount[h_neighbourArray[j]-1]-1] = true;
// }
//
// for (int j=0; j<max+1; j++){
// if (usedColours[j]==false){
// h_degreeCount[n-1]=j+1;
// break;
// }
// }
// }
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&timeNew, start, stop);
fout<<timeNew<<"\t"<<max<<"\t"<<count1<<"\t";
// fout<<"Time: "<<(double)(clock() - tStart)/CLOCKS_PER_SEC;
//
// cout<<"SHAMILASADJKAJSDKLJASHDKJASHLDKASJKD";
// for (int i=0; i<n; i++){
// cout<<h_degreeCount[i]<<endl;
// }
// for (int i=0; i<n-1; i++){
//
// int start = -1, stop = -1;
//
// start = h_vertexArray[i];
//
// stop = h_vertexArray[i+1];
//
// cout<<"My id: "<<i<<endl;
//
// cout<<"My colour: "<<h_degreeCount[i]<<endl;
//
// cout<<"Neighbours"<<endl;
//
// for (int j=start; j<stop; j++){
// cout<<h_degreeCount[h_neighbourArray[j]-1]<<" ";
// }
// }
//
//
//
// if (h_detectConflict[n-1]!=0){
// int start = -1, stop = -1;
//
// start = h_vertexArray[n-1];
//
// stop = m;
//
// cout<<"My id: "<<n-1<<endl;
//
// cout<<"My colour: "<<h_degreeCount[n-1]<<endl;
//
// cout<<"Neighbours"<<endl;
//
// for (int j=start; j<stop; j++){
// cout<<h_degreeCount[h_neighbourArray[j]-1]<<" ";
// }
// }
cout<<"Shamil"<<endl;
cudaMemset((void *)d_detectConflict, 0, (n)*sizeof(int));
cudaMemcpy(d_degreeCount, h_degreeCount, n*sizeof(int), cudaMemcpyHostToDevice);
conflictDetection<<<blocksPerGrid, threadsPerBlock>>>(d_vertexArray, d_neighbourArray, d_degreeCount, n, m, d_detectConflict);
count1 = thrust::reduce(d_detectConflict_ptr, d_detectConflict_ptr + n);
cout<<"Count: "<<count1<<" "<<countnew<<endl;
thrust::device_ptr<int> c_ptr = thrust::device_pointer_cast(d_degreeCount);
int maxColour = *(thrust::max_element(c_ptr, c_ptr + n));
cout<<"Max Colour = "<<maxColour<<endl;
fout<<maxColour<<"\t";
int maxColourNew;
thrust::device_ptr<int> d_propagationArray_ptr = thrust::device_pointer_cast(d_propagationArray1);
maxColourNew = 0;
colourCountFunc<<< blocksPerGrid, threadsPerBlock >>>(d_degreeCount, n, d_propagationArray1);
maxColourNew = thrust::reduce(d_propagationArray_ptr, d_propagationArray_ptr + newConst);
cudaMemset((void *)d_propagationArray1, 0, (newConst)*sizeof(int));
fout<<maxColourNew<<"\n";
// for (int i=0; i<n; i++){
// if (h_degreeCount[i] == max+1){
// cout<<"BUHAHAHAHAHAHHAHAHAHHAHA"<<endl;
// }
//
// else if (h_degreeCount[i] == 1){
// cout<<"LALLLALALALALALALALALLALA"<<endl;
// }
// cout<<h_degreeCount[i]<<endl;
// }
// for (int i=0; i<n; i++){
// cout<<i+1<<": "<<h_detectConflict[i]<<endl;
// }
//edgesPrint(h_vertexArray, h_neighbourArray, n, m);
//delete[] h_vertexArray;
//delete[] h_neighbourArray;
//delete[] h_degreeCount;
delete[] h_vertexArray;
delete[] h_neighbourArray;
delete[] h_degreeCount;
delete[] h_detectConflict;
cudaFree(d_neighbourArray);
cudaFree(d_vertexArray);
cudaFree(d_degreeCount);
cudaFree(d_detectConflict);
fout.close();
cudaDeviceReset();
return 0;
}
|
8062fbbe94d2548c722f25dfda496eb81ad67d9b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <sys/time.h>
#define FILTER_RADIUS 16
#define FILTER_LENGTH (2 * FILTER_RADIUS + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 5e-4
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
__constant__ float Filter[FILTER_LENGTH]; // gia na apothikeusw tous suntelestes tou filtrou
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = filterR; y < imageH-filterR; y++) {
for (x = filterR; x < imageW-filterR; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = filterR; y < imageH-filterR; y++) {
for (x = filterR; x < imageW-filterR; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
h_Dst[y * imageW + x] = sum;
}
}
}
}
///// Reference row convolution filter in GPU /////
__global__ void row_Kernel(float *Dst, float *Src, float *filter,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
if(!((threadIdx.x+blockDim.x*blockIdx.x) < filterR || (threadIdx.x+blockDim.x*blockIdx.x) >= imageH-filterR)){
for(k = -filterR; k<=filterR; k++){
int d = threadIdx.x + k+blockDim.x * blockIdx.x;
sum += Src[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + d]*filter[filterR-k];
}
}
Dst[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + threadIdx.x + blockDim.x*blockIdx.x] = sum;
}
///// Reference column convolution filter in GPU /////
__global__ void column_Kernel(float *Dst, float *Src, float *filter,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
if(!((threadIdx.y+blockDim.y*blockIdx.y) < filterR || (threadIdx.y+blockDim.y*blockIdx.y) >= imageH-filterR)){
for(k = -filterR; k<=filterR; k++){
int d = k+ (blockIdx.y * blockDim.y + threadIdx.y);
sum += Src[d*imageW + threadIdx.x+blockDim.x * blockIdx.x]*filter[filterR-k];
}
}
Dst[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + threadIdx.x+blockDim.x*blockIdx.x] = sum;
}
///// Reference tiled row convolution filter in GPU /////
__global__ void tiled_row_Kernel(float *Dst, float *Src,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
// allocate 2D tile in __shared__ memory
__shared__ float data[TILE_WIDTH][TILE_HEIGHT];
//bash eikonas
const int x0 = threadIdx.x + (blockIdx.x * blockDim.x);
//gia thn global adress autou tou thread
const int adr = (blockIdx.y * blockDim.y + threadIdx.y )* imageW;
const int x = blockIdx.x * blockDim.x + threadIdx.x ;
const int y = ( blockIdx.y * blockDim.y ) + threadIdx.y;
// fortwnw sth shared apo th global
data[ threadIdx.y ][ threadIdx.x ] = Src[y*imageW + x];
__syncthreads(); //barrier gia na perimenw ta threads tou block
if(!(( x0 < filterR) || (x0 >= imageH -filterR))){
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if( d >= (blockIdx.x * blockDim.x) && d < ( blockIdx.x * blockDim.x )){
sum += data[ threadIdx.y ][ threadIdx.x + k] * Filter[filterR - k];
}else{
sum += Src[ adr + d ] * Filter[ filterR - k ];
}
}
}
Dst[ adr + x ] = sum;
}
///// Reference tiled_column convolution filter in GPU /////
__global__ void tiled_column_Kernel(float *Dst, float *Src,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
// allocate 2D tile in __shared__ memory
__shared__ float data[TILE_WIDTH][TILE_HEIGHT];
//bash eikonas
const int y0 = threadIdx.y + (blockIdx.y * blockDim.y);
//gia thn global adress autou tou thread
const int adr = (blockIdx.y * blockDim.y + threadIdx.y )* imageW;
const int x = blockIdx.x * blockDim.x + threadIdx.x ;
const int y = ( blockIdx.y * blockDim.y ) + threadIdx.y;
// fortwnw sth shared apo th global
data[ threadIdx.y ][ threadIdx.x ] = Src[y*imageW + x];
__syncthreads(); //barrier gia na perimenw ta threads tou block
if(!( (y0 < filterR) || (y0 >= imageH-filterR ))){
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if( d >= (blockIdx.y * blockDim.x) && d < ( blockIdx.y * blockDim.y )){
sum += data[ threadIdx.y + k ][ threadIdx.x ] * Filter[filterR - k];
}else{
sum += Src[ d * imageW +x ] * Filter[ filterR - k ];
}
}
}
Dst[ adr + x ] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*d_Input, //eikona eisodou sto device
*d_OutputGPU1,*d_OutputGPU2, //apotelesma apo to device gpu
*d_Filter, //filtro sto device
*h_OutputGPU1,*h_OutputGPU2, //to epistrefomeno apotelesma apo thn gpu sto host
*d_Buffer; //Buffer sto device gia endiameso apotelesma apo thn row sth column ston kernel
int imageW,newW;
int imageH,newH;
unsigned int i,j;
hipEvent_t start_GPU1,start_GPU2; //var gia na metrisw xrono sth gpu
hipEvent_t stop_GPU1,stop_GPU2; //var gia na metrisw xrono sth gpu
float elapsed_GPU1,elapsed_GPU2; //xronos sth gpu
timeval t1; //gia na metrisw to xrono sth cpu
timeval t2; //gia na metrisw to xrono sth cpu
double elapsed_CPU; // xronos sth cpu
hipError_t err; // elegxos gia cuda malloc kai cudamemcopy
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
if(argc <= 1){
printf("I have to terminate because you didnt enter image size.Pls try again \n");
return 1;
}
imageW=atoi(argv[1]);
printf("You entered image size. Should be a power of two and greater than %d\n", FILTER_LENGTH);
if( imageW <= FILTER_LENGTH){
printf("I have to terminate because you enter image smaller than filter\n");
return 1;
}
imageH = imageW;
newH = imageH + FILTER_LENGTH -1;
newW = imageW + FILTER_LENGTH -1;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(newW * newH * sizeof(float));
h_Buffer = (float *)malloc(newW * newH * sizeof(float));
h_OutputCPU = (float *)malloc(newW * newH * sizeof(float));
h_OutputGPU1 = (float *)malloc(newW * newH * sizeof(float));
h_OutputGPU2 = (float *)malloc(newW * newH * sizeof(float));
// if either memory allocation failed, report an error message
if(h_Filter == 0 || h_Input == 0 || h_Buffer == 0 || h_OutputCPU == 0 || h_OutputGPU1 == 0 || h_OutputGPU2 == 0 )
{
printf("couldn't allocate memory\n");
return 1;
}
hipSetDevice(0/*cutGetMaxGflopsDevice()*/);
// Allocate memory on device
err = hipSuccess;
err = hipMalloc((void**)&d_Input, newW * newH * sizeof(float));
err = hipMalloc((void**)&d_OutputGPU1, newW * newH * sizeof(float));
err = hipMalloc((void**)&d_OutputGPU2, newW * newH * sizeof(float));
err = hipMalloc((void**)&d_Filter, ((2*FILTER_RADIUS)+1)*sizeof(float));
err = hipMalloc((void**)&d_Buffer, newW * newH * sizeof(float));
// if either memory allocation failed, report an error message
if( err != hipSuccess) {
printf("CUDA Error in allocation memory on device: %s\n", hipGetErrorString(err));
return 1;
}
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
//arxikopoiw me floats wste na exw megaluterh anakriveia sta apotelesmata mou
srand(time(NULL));
for (i = 0; i < FILTER_LENGTH; i++)
{
h_Filter[i] = (float)(rand() / (float)RAND_MAX);
}
for (i = 0; i < newW; i++){
for(j=0; j < newH; j++){
if(i<FILTER_RADIUS || j<FILTER_RADIUS || i >= (imageW+FILTER_RADIUS) || j>=(imageH+FILTER_RADIUS)){
h_Input[j*newW+i] = 0;
}
else{
h_Input[j*newH+i] = (float)(rand()/(float)RAND_MAX);
}
h_Buffer[j*newW+i]=0;
h_OutputCPU[j*newH+i]=0;
}
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
///// cpu events gia metrisi xronou /////
gettimeofday(&t1, NULL);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, newW, newH, FILTER_RADIUS); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, newW, newH, FILTER_RADIUS); // convolution kata sthles
///// cpu events gia metrisi xronou /////
gettimeofday(&t2, NULL);
elapsed_CPU = (t2.tv_sec - t1.tv_sec) + ((t2.tv_usec - t1.tv_usec)/1000000.0);
printf("CPU elapsed time:%f sec\n",elapsed_CPU);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("GPU computation...\n");
//Load h_Input and h_Filter to device memory
err = hipMemcpy(d_Input,h_Input,newW * newH * sizeof(float),hipMemcpyHostToDevice);
err = hipMemcpy(d_Filter,h_Filter,((2*FILTER_RADIUS)+1)*sizeof(float),hipMemcpyHostToDevice);
err = hipMemcpyToSymbol(Filter,h_Filter,((2*FILTER_RADIUS)+1)*sizeof(float),0,hipMemcpyHostToDevice);
if( err != hipSuccess) {
printf("CUDA Error in loading memory on device: %s\n", hipGetErrorString(err));
return 1;
}
// Kernel Invocation
// Setup the execution configuration
dim3 dimBlock;
dimBlock.x=TILE_WIDTH;
dimBlock.y=TILE_HEIGHT;
dim3 dimGrid(newW/dimBlock.x,newH/dimBlock.y);
///// cuda events for gpu time calculation /////
hipEventCreate(&start_GPU1);
hipEventCreate(&stop_GPU1);
hipEventRecord(start_GPU1, 0);
//Launch the device
hipDeviceSynchronize();
hipLaunchKernelGGL(( row_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Buffer,d_Input,d_Filter,newW,newH,FILTER_RADIUS);
hipDeviceSynchronize();
hipLaunchKernelGGL(( column_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_OutputGPU1,d_Buffer,d_Filter,newW,newH,FILTER_RADIUS);
hipDeviceSynchronize();
///// cuda events for gpu time calculation /////
hipEventRecord(stop_GPU1, 0);
hipEventSynchronize(stop_GPU1);
hipEventElapsedTime(&elapsed_GPU1, start_GPU1, stop_GPU1);
hipEventDestroy(start_GPU1);
hipEventDestroy(stop_GPU1);
///// cuda events for tiled gpu time calculation /////
hipEventCreate(&start_GPU2);
hipEventCreate(&stop_GPU2);
hipEventRecord(start_GPU2, 0);
//Launch the tiled device
hipDeviceSynchronize();
hipLaunchKernelGGL(( tiled_row_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_Buffer,d_Input,newW,newH,FILTER_RADIUS);
hipDeviceSynchronize();
hipLaunchKernelGGL(( tiled_column_Kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_OutputGPU2,d_Buffer,newW,newH,FILTER_RADIUS);
hipDeviceSynchronize();
///// cuda events for tiled gpu time calculation /////
hipEventRecord(stop_GPU2, 0);
hipEventSynchronize(stop_GPU2);
hipEventElapsedTime(&elapsed_GPU2, start_GPU2, stop_GPU2);
hipEventDestroy(start_GPU2);
hipEventDestroy(stop_GPU2);
// ask CUDA for the last error to occur (if one exists)
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
printf("CUDA Error: %s\n", hipGetErrorString(error));
return 1;
}
//Read d_OutputGPU1 and d_OutputGPU2 from the device
err = hipMemcpy(h_OutputGPU1,d_OutputGPU1,newW * newH * sizeof(float),hipMemcpyDeviceToHost);
err = hipMemcpy(h_OutputGPU2,d_OutputGPU2,newW * newH * sizeof(float),hipMemcpyDeviceToHost);
if( err != hipSuccess) {
printf("CUDA Error in reading memory from device: %s\n", hipGetErrorString(err));
return 1;
}
printf("GPU elapsed time:%f sec \n ",elapsed_GPU1/1000);
printf("--tiled--GPU elapsed time:%f sec \n ",elapsed_GPU2/1000);
printf("1.GPU:%d=%f\n",imageW * imageH-1,h_OutputGPU1[imageW * imageH-1]);
printf("2.--tiled--GPU:%d=%f\n",imageW * imageH-1,h_OutputGPU2[imageW * imageH-1]);
printf("3.CPU:%d=%f\n",imageW * imageH-1,h_OutputCPU[imageW * imageH-1]);
// CPU Vs GPU (comparison) //
for(i = 0; i< newW * newH; i++){
if ( ABS(h_OutputCPU[i] - h_OutputGPU2[i]) >= accuracy ) {
printf("ERROR at element i:%d , accuracy error so i have to terminate sorry \n",i);
return 1;
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_OutputGPU1);
free(h_OutputGPU2);
hipFree(d_Input);
hipFree(d_Filter);
hipFree(d_OutputGPU1);
hipFree(d_OutputGPU2);
hipFree(d_Buffer);
printf("success !!!! \n");
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
hipDeviceReset();
return 0;
}
| 8062fbbe94d2548c722f25dfda496eb81ad67d9b.cu | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <sys/time.h>
#define FILTER_RADIUS 16
#define FILTER_LENGTH (2 * FILTER_RADIUS + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 5e-4
#define TILE_WIDTH 32
#define TILE_HEIGHT 32
__constant__ float Filter[FILTER_LENGTH]; // gia na apothikeusw tous suntelestes tou filtrou
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = filterR; y < imageH-filterR; y++) {
for (x = filterR; x < imageW-filterR; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = filterR; y < imageH-filterR; y++) {
for (x = filterR; x < imageW-filterR; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
h_Dst[y * imageW + x] = sum;
}
}
}
}
///// Reference row convolution filter in GPU /////
__global__ void row_Kernel(float *Dst, float *Src, float *filter,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
if(!((threadIdx.x+blockDim.x*blockIdx.x) < filterR || (threadIdx.x+blockDim.x*blockIdx.x) >= imageH-filterR)){
for(k = -filterR; k<=filterR; k++){
int d = threadIdx.x + k+blockDim.x * blockIdx.x;
sum += Src[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + d]*filter[filterR-k];
}
}
Dst[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + threadIdx.x + blockDim.x*blockIdx.x] = sum;
}
///// Reference column convolution filter in GPU /////
__global__ void column_Kernel(float *Dst, float *Src, float *filter,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
if(!((threadIdx.y+blockDim.y*blockIdx.y) < filterR || (threadIdx.y+blockDim.y*blockIdx.y) >= imageH-filterR)){
for(k = -filterR; k<=filterR; k++){
int d = k+ (blockIdx.y * blockDim.y + threadIdx.y);
sum += Src[d*imageW + threadIdx.x+blockDim.x * blockIdx.x]*filter[filterR-k];
}
}
Dst[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + threadIdx.x+blockDim.x*blockIdx.x] = sum;
}
///// Reference tiled row convolution filter in GPU /////
__global__ void tiled_row_Kernel(float *Dst, float *Src,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
// allocate 2D tile in __shared__ memory
__shared__ float data[TILE_WIDTH][TILE_HEIGHT];
//bash eikonas
const int x0 = threadIdx.x + (blockIdx.x * blockDim.x);
//gia thn global adress autou tou thread
const int adr = (blockIdx.y * blockDim.y + threadIdx.y )* imageW;
const int x = blockIdx.x * blockDim.x + threadIdx.x ;
const int y = ( blockIdx.y * blockDim.y ) + threadIdx.y;
// fortwnw sth shared apo th global
data[ threadIdx.y ][ threadIdx.x ] = Src[y*imageW + x];
__syncthreads(); //barrier gia na perimenw ta threads tou block
if(!(( x0 < filterR) || (x0 >= imageH -filterR))){
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
if( d >= (blockIdx.x * blockDim.x) && d < ( blockIdx.x * blockDim.x )){
sum += data[ threadIdx.y ][ threadIdx.x + k] * Filter[filterR - k];
}else{
sum += Src[ adr + d ] * Filter[ filterR - k ];
}
}
}
Dst[ adr + x ] = sum;
}
///// Reference tiled_column convolution filter in GPU /////
__global__ void tiled_column_Kernel(float *Dst, float *Src,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
// allocate 2D tile in __shared__ memory
__shared__ float data[TILE_WIDTH][TILE_HEIGHT];
//bash eikonas
const int y0 = threadIdx.y + (blockIdx.y * blockDim.y);
//gia thn global adress autou tou thread
const int adr = (blockIdx.y * blockDim.y + threadIdx.y )* imageW;
const int x = blockIdx.x * blockDim.x + threadIdx.x ;
const int y = ( blockIdx.y * blockDim.y ) + threadIdx.y;
// fortwnw sth shared apo th global
data[ threadIdx.y ][ threadIdx.x ] = Src[y*imageW + x];
__syncthreads(); //barrier gia na perimenw ta threads tou block
if(!( (y0 < filterR) || (y0 >= imageH-filterR ))){
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if( d >= (blockIdx.y * blockDim.x) && d < ( blockIdx.y * blockDim.y )){
sum += data[ threadIdx.y + k ][ threadIdx.x ] * Filter[filterR - k];
}else{
sum += Src[ d * imageW +x ] * Filter[ filterR - k ];
}
}
}
Dst[ adr + x ] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*d_Input, //eikona eisodou sto device
*d_OutputGPU1,*d_OutputGPU2, //apotelesma apo to device gpu
*d_Filter, //filtro sto device
*h_OutputGPU1,*h_OutputGPU2, //to epistrefomeno apotelesma apo thn gpu sto host
*d_Buffer; //Buffer sto device gia endiameso apotelesma apo thn row sth column ston kernel
int imageW,newW;
int imageH,newH;
unsigned int i,j;
cudaEvent_t start_GPU1,start_GPU2; //var gia na metrisw xrono sth gpu
cudaEvent_t stop_GPU1,stop_GPU2; //var gia na metrisw xrono sth gpu
float elapsed_GPU1,elapsed_GPU2; //xronos sth gpu
timeval t1; //gia na metrisw to xrono sth cpu
timeval t2; //gia na metrisw to xrono sth cpu
double elapsed_CPU; // xronos sth cpu
cudaError_t err; // elegxos gia cuda malloc kai cudamemcopy
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
if(argc <= 1){
printf("I have to terminate because you didnt enter image size.Pls try again \n");
return 1;
}
imageW=atoi(argv[1]);
printf("You entered image size. Should be a power of two and greater than %d\n", FILTER_LENGTH);
if( imageW <= FILTER_LENGTH){
printf("I have to terminate because you enter image smaller than filter\n");
return 1;
}
imageH = imageW;
newH = imageH + FILTER_LENGTH -1;
newW = imageW + FILTER_LENGTH -1;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(newW * newH * sizeof(float));
h_Buffer = (float *)malloc(newW * newH * sizeof(float));
h_OutputCPU = (float *)malloc(newW * newH * sizeof(float));
h_OutputGPU1 = (float *)malloc(newW * newH * sizeof(float));
h_OutputGPU2 = (float *)malloc(newW * newH * sizeof(float));
// if either memory allocation failed, report an error message
if(h_Filter == 0 || h_Input == 0 || h_Buffer == 0 || h_OutputCPU == 0 || h_OutputGPU1 == 0 || h_OutputGPU2 == 0 )
{
printf("couldn't allocate memory\n");
return 1;
}
cudaSetDevice(0/*cutGetMaxGflopsDevice()*/);
// Allocate memory on device
err = cudaSuccess;
err = cudaMalloc((void**)&d_Input, newW * newH * sizeof(float));
err = cudaMalloc((void**)&d_OutputGPU1, newW * newH * sizeof(float));
err = cudaMalloc((void**)&d_OutputGPU2, newW * newH * sizeof(float));
err = cudaMalloc((void**)&d_Filter, ((2*FILTER_RADIUS)+1)*sizeof(float));
err = cudaMalloc((void**)&d_Buffer, newW * newH * sizeof(float));
// if either memory allocation failed, report an error message
if( err != cudaSuccess) {
printf("CUDA Error in allocation memory on device: %s\n", cudaGetErrorString(err));
return 1;
}
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
//arxikopoiw me floats wste na exw megaluterh anakriveia sta apotelesmata mou
srand(time(NULL));
for (i = 0; i < FILTER_LENGTH; i++)
{
h_Filter[i] = (float)(rand() / (float)RAND_MAX);
}
for (i = 0; i < newW; i++){
for(j=0; j < newH; j++){
if(i<FILTER_RADIUS || j<FILTER_RADIUS || i >= (imageW+FILTER_RADIUS) || j>=(imageH+FILTER_RADIUS)){
h_Input[j*newW+i] = 0;
}
else{
h_Input[j*newH+i] = (float)(rand()/(float)RAND_MAX);
}
h_Buffer[j*newW+i]=0;
h_OutputCPU[j*newH+i]=0;
}
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
///// cpu events gia metrisi xronou /////
gettimeofday(&t1, NULL);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, newW, newH, FILTER_RADIUS); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, newW, newH, FILTER_RADIUS); // convolution kata sthles
///// cpu events gia metrisi xronou /////
gettimeofday(&t2, NULL);
elapsed_CPU = (t2.tv_sec - t1.tv_sec) + ((t2.tv_usec - t1.tv_usec)/1000000.0);
printf("CPU elapsed time:%f sec\n",elapsed_CPU);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("GPU computation...\n");
//Load h_Input and h_Filter to device memory
err = cudaMemcpy(d_Input,h_Input,newW * newH * sizeof(float),cudaMemcpyHostToDevice);
err = cudaMemcpy(d_Filter,h_Filter,((2*FILTER_RADIUS)+1)*sizeof(float),cudaMemcpyHostToDevice);
err = cudaMemcpyToSymbol(Filter,h_Filter,((2*FILTER_RADIUS)+1)*sizeof(float),0,cudaMemcpyHostToDevice);
if( err != cudaSuccess) {
printf("CUDA Error in loading memory on device: %s\n", cudaGetErrorString(err));
return 1;
}
// Kernel Invocation
// Setup the execution configuration
dim3 dimBlock;
dimBlock.x=TILE_WIDTH;
dimBlock.y=TILE_HEIGHT;
dim3 dimGrid(newW/dimBlock.x,newH/dimBlock.y);
///// cuda events for gpu time calculation /////
cudaEventCreate(&start_GPU1);
cudaEventCreate(&stop_GPU1);
cudaEventRecord(start_GPU1, 0);
//Launch the device
cudaDeviceSynchronize();
row_Kernel<<<dimGrid,dimBlock>>>(d_Buffer,d_Input,d_Filter,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
column_Kernel<<<dimGrid,dimBlock>>>(d_OutputGPU1,d_Buffer,d_Filter,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
///// cuda events for gpu time calculation /////
cudaEventRecord(stop_GPU1, 0);
cudaEventSynchronize(stop_GPU1);
cudaEventElapsedTime(&elapsed_GPU1, start_GPU1, stop_GPU1);
cudaEventDestroy(start_GPU1);
cudaEventDestroy(stop_GPU1);
///// cuda events for tiled gpu time calculation /////
cudaEventCreate(&start_GPU2);
cudaEventCreate(&stop_GPU2);
cudaEventRecord(start_GPU2, 0);
//Launch the tiled device
cudaDeviceSynchronize();
tiled_row_Kernel<<<dimGrid,dimBlock>>>(d_Buffer,d_Input,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
tiled_column_Kernel<<<dimGrid,dimBlock>>>(d_OutputGPU2,d_Buffer,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
///// cuda events for tiled gpu time calculation /////
cudaEventRecord(stop_GPU2, 0);
cudaEventSynchronize(stop_GPU2);
cudaEventElapsedTime(&elapsed_GPU2, start_GPU2, stop_GPU2);
cudaEventDestroy(start_GPU2);
cudaEventDestroy(stop_GPU2);
// ask CUDA for the last error to occur (if one exists)
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
printf("CUDA Error: %s\n", cudaGetErrorString(error));
return 1;
}
//Read d_OutputGPU1 and d_OutputGPU2 from the device
err = cudaMemcpy(h_OutputGPU1,d_OutputGPU1,newW * newH * sizeof(float),cudaMemcpyDeviceToHost);
err = cudaMemcpy(h_OutputGPU2,d_OutputGPU2,newW * newH * sizeof(float),cudaMemcpyDeviceToHost);
if( err != cudaSuccess) {
printf("CUDA Error in reading memory from device: %s\n", cudaGetErrorString(err));
return 1;
}
printf("GPU elapsed time:%f sec \n ",elapsed_GPU1/1000);
printf("--tiled--GPU elapsed time:%f sec \n ",elapsed_GPU2/1000);
printf("1.GPU:%d=%f\n",imageW * imageH-1,h_OutputGPU1[imageW * imageH-1]);
printf("2.--tiled--GPU:%d=%f\n",imageW * imageH-1,h_OutputGPU2[imageW * imageH-1]);
printf("3.CPU:%d=%f\n",imageW * imageH-1,h_OutputCPU[imageW * imageH-1]);
// CPU Vs GPU (comparison) //
for(i = 0; i< newW * newH; i++){
if ( ABS(h_OutputCPU[i] - h_OutputGPU2[i]) >= accuracy ) {
printf("ERROR at element i:%d , accuracy error so i have to terminate sorry \n",i);
return 1;
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_OutputGPU1);
free(h_OutputGPU2);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaFree(d_OutputGPU1);
cudaFree(d_OutputGPU2);
cudaFree(d_Buffer);
printf("success !!!! \n");
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
d605c3d8a4f7f835cc8cbf00746f0d172f35dd2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
#include <opencv2/cudev/ptr2d/texture.hpp>
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void remap(const Ptr2D src, const PtrStepf mapx, const PtrStepf mapy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float xcoo = mapx.ptr(y)[x];
const float ycoo = mapy.ptr(y)[x];
dst.ptr(y)[x] = saturate_cast<T>(src(ycoo, xcoo));
}
}
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherStream
{
static void call(PtrStepSz<T> src, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, hipStream_t stream, bool)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<PtrStep<T>, B<work_type>> brdSrc(src, brd);
Filter<BorderReader<PtrStep<T>, B<work_type>>> filter_src(brdSrc);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, stream, filter_src, mapx, mapy, dst);
cudaSafeCall( hipGetLastError() );
}
};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, bool)
{
CV_UNUSED(srcWhole);
CV_UNUSED(xoff);
CV_UNUSED(yoff);
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<PtrStep<T>, B<work_type>> brdSrc(src, brd);
Filter<BorderReader<PtrStep<T>, B<work_type>>> filter_src(brdSrc);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherNonStreamTex
{
static void call(PtrStepSz< T > src, PtrStepSz< T > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz< T > dst, const float* borderValue, bool cc20)
{
typedef typename TypeVec<float, VecTraits< T >::cn>::vec_type work_type;
dim3 block(32, cc20 ? 8 : 4);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows)
{
cudev::Texture<T> texSrcWhole(srcWhole);
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<cudev::TexturePtr<T>, B<work_type>> brdSrc(texSrcWhole, brd);
Filter<BorderReader<cudev::TexturePtr<T>, B<work_type>>> filter_src(brdSrc);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst);
}
else {
cudev::TextureOff<T> texSrcWhole(srcWhole, yoff, xoff);
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<cudev::TextureOffPtr<T>, B<work_type>> brdSrc(texSrcWhole, brd);
Filter<BorderReader<cudev::TextureOffPtr<T>, B<work_type>>> filter_src(brdSrc);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block) , 0, 0, filter_src, mapx, mapy, dst);
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <template <typename> class Filter, typename T> struct RemapDispatcherNonStreamTex<Filter, BrdReplicate, T>
{
static void call(PtrStepSz< T > src, PtrStepSz< T > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz< T > dst, const float*, bool)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows)
{
cudev::Texture<T> texSrcWhole(srcWhole);
Filter<cudev::TexturePtr<T>> filter_src(texSrcWhole);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst);
}
else
{
cudev::TextureOff<T> texSrcWhole(srcWhole, yoff, xoff);
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader<cudev::TextureOffPtr<T>, BrdReplicate<T>> brdSrc(texSrcWhole, brd);
Filter<BorderReader<cudev::TextureOffPtr<T>, BrdReplicate<T>>> filter_src(brdSrc);
hipLaunchKernelGGL(( remap), dim3(grid), dim3(block), 0, 0, filter_src, mapx, mapy, dst);
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, uchar> :
RemapDispatcherNonStreamTex<Filter, B, uchar> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, uchar4> :
RemapDispatcherNonStreamTex<Filter, B, uchar4> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, ushort> :
RemapDispatcherNonStreamTex<Filter, B, ushort> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, ushort4> :
RemapDispatcherNonStreamTex<Filter, B, ushort4> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, short> :
RemapDispatcherNonStreamTex<Filter, B, short> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, short4> :
RemapDispatcherNonStreamTex<Filter, B, short4> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, float> :
RemapDispatcherNonStreamTex<Filter, B, float> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, float4> :
RemapDispatcherNonStreamTex<Filter, B, float4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, uchar> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, uchar> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, uchar4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, uchar4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, ushort> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, ushort> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, ushort4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, ushort4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, short> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, short> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, short4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, short4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, float> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, float> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, float4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, float4> {};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz<T> dst, const float* borderValue, hipStream_t stream, bool cc20)
{
if (stream == 0)
RemapDispatcherNonStream<Filter, B, T>::call(src, srcWhole, xoff, yoff, mapx, mapy, dst, borderValue, cc20);
else
RemapDispatcherStream<Filter, B, T>::call(src, mapx, mapy, dst, borderValue, stream, cc20);
}
};
template <typename T> void remap_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSz<T> dst, const float* borderValue, hipStream_t stream, bool cc20);
static const caller_t callers[3][5] =
{
{
RemapDispatcher<PointFilter, BrdConstant, T>::call,
RemapDispatcher<PointFilter, BrdReplicate, T>::call,
RemapDispatcher<PointFilter, BrdReflect, T>::call,
RemapDispatcher<PointFilter, BrdWrap, T>::call,
RemapDispatcher<PointFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<LinearFilter, BrdConstant, T>::call,
RemapDispatcher<LinearFilter, BrdReplicate, T>::call,
RemapDispatcher<LinearFilter, BrdReflect, T>::call,
RemapDispatcher<LinearFilter, BrdWrap, T>::call,
RemapDispatcher<LinearFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<CubicFilter, BrdConstant, T>::call,
RemapDispatcher<CubicFilter, BrdReplicate, T>::call,
RemapDispatcher<CubicFilter, BrdReflect, T>::call,
RemapDispatcher<CubicFilter, BrdWrap, T>::call,
RemapDispatcher<CubicFilter, BrdReflect101, T>::call
}
};
callers[interpolation][borderMode](static_cast<PtrStepSz<T>>(src), static_cast<PtrStepSz<T>>(srcWhole), xoff, yoff, xmap, ymap,
static_cast<PtrStepSz<T>>(dst), borderValue, stream, cc20);
}
template void remap_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
template void remap_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, hipStream_t stream, bool cc20);
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
| d605c3d8a4f7f835cc8cbf00746f0d172f35dd2c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/filters.hpp"
#include <opencv2/cudev/ptr2d/texture.hpp>
namespace cv { namespace cuda { namespace device
{
namespace imgproc
{
template <typename Ptr2D, typename T> __global__ void remap(const Ptr2D src, const PtrStepf mapx, const PtrStepf mapy, PtrStepSz<T> dst)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < dst.cols && y < dst.rows)
{
const float xcoo = mapx.ptr(y)[x];
const float ycoo = mapy.ptr(y)[x];
dst.ptr(y)[x] = saturate_cast<T>(src(ycoo, xcoo));
}
}
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherStream
{
static void call(PtrStepSz<T> src, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool)
{
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<PtrStep<T>, B<work_type>> brdSrc(src, brd);
Filter<BorderReader<PtrStep<T>, B<work_type>>> filter_src(brdSrc);
remap<<<grid, block, 0, stream>>>(filter_src, mapx, mapy, dst);
cudaSafeCall( cudaGetLastError() );
}
};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherNonStream
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy, PtrStepSz<T> dst, const float* borderValue, bool)
{
CV_UNUSED(srcWhole);
CV_UNUSED(xoff);
CV_UNUSED(yoff);
typedef typename TypeVec<float, VecTraits<T>::cn>::vec_type work_type;
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<PtrStep<T>, B<work_type>> brdSrc(src, brd);
Filter<BorderReader<PtrStep<T>, B<work_type>>> filter_src(brdSrc);
remap<<<grid, block>>>(filter_src, mapx, mapy, dst);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcherNonStreamTex
{
static void call(PtrStepSz< T > src, PtrStepSz< T > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz< T > dst, const float* borderValue, bool cc20)
{
typedef typename TypeVec<float, VecTraits< T >::cn>::vec_type work_type;
dim3 block(32, cc20 ? 8 : 4);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows)
{
cudev::Texture<T> texSrcWhole(srcWhole);
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<cudev::TexturePtr<T>, B<work_type>> brdSrc(texSrcWhole, brd);
Filter<BorderReader<cudev::TexturePtr<T>, B<work_type>>> filter_src(brdSrc);
remap<<<grid, block>>>(filter_src, mapx, mapy, dst);
}
else {
cudev::TextureOff<T> texSrcWhole(srcWhole, yoff, xoff);
B<work_type> brd(src.rows, src.cols, VecTraits<work_type>::make(borderValue));
BorderReader<cudev::TextureOffPtr<T>, B<work_type>> brdSrc(texSrcWhole, brd);
Filter<BorderReader<cudev::TextureOffPtr<T>, B<work_type>>> filter_src(brdSrc);
remap<<<grid, block >>>(filter_src, mapx, mapy, dst);
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <template <typename> class Filter, typename T> struct RemapDispatcherNonStreamTex<Filter, BrdReplicate, T>
{
static void call(PtrStepSz< T > src, PtrStepSz< T > srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz< T > dst, const float*, bool)
{
dim3 block(32, 8);
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
if (srcWhole.cols == src.cols && srcWhole.rows == src.rows)
{
cudev::Texture<T> texSrcWhole(srcWhole);
Filter<cudev::TexturePtr<T>> filter_src(texSrcWhole);
remap<<<grid, block>>>(filter_src, mapx, mapy, dst);
}
else
{
cudev::TextureOff<T> texSrcWhole(srcWhole, yoff, xoff);
BrdReplicate<T> brd(src.rows, src.cols);
BorderReader<cudev::TextureOffPtr<T>, BrdReplicate<T>> brdSrc(texSrcWhole, brd);
Filter<BorderReader<cudev::TextureOffPtr<T>, BrdReplicate<T>>> filter_src(brdSrc);
remap<<<grid, block>>>(filter_src, mapx, mapy, dst);
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, uchar> :
RemapDispatcherNonStreamTex<Filter, B, uchar> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, uchar4> :
RemapDispatcherNonStreamTex<Filter, B, uchar4> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, ushort> :
RemapDispatcherNonStreamTex<Filter, B, ushort> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, ushort4> :
RemapDispatcherNonStreamTex<Filter, B, ushort4> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, short> :
RemapDispatcherNonStreamTex<Filter, B, short> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, short4> :
RemapDispatcherNonStreamTex<Filter, B, short4> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, float> :
RemapDispatcherNonStreamTex<Filter, B, float> {};
template <template <typename> class Filter, template <typename> class B> struct RemapDispatcherNonStream<Filter, B, float4> :
RemapDispatcherNonStreamTex<Filter, B, float4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, uchar> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, uchar> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, uchar4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, uchar4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, ushort> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, ushort> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, ushort4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, ushort4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, short> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, short> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, short4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, short4> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, float> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, float> {};
template <template <typename> class Filter> struct RemapDispatcherNonStream<Filter, BrdReplicate, float4> :
RemapDispatcherNonStreamTex<Filter, BrdReplicate, float4> {};
template <template <typename> class Filter, template <typename> class B, typename T> struct RemapDispatcher
{
static void call(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf mapx, PtrStepSzf mapy,
PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool cc20)
{
if (stream == 0)
RemapDispatcherNonStream<Filter, B, T>::call(src, srcWhole, xoff, yoff, mapx, mapy, dst, borderValue, cc20);
else
RemapDispatcherStream<Filter, B, T>::call(src, mapx, mapy, dst, borderValue, stream, cc20);
}
};
template <typename T> void remap_gpu(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20)
{
typedef void (*caller_t)(PtrStepSz<T> src, PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap,
PtrStepSz<T> dst, const float* borderValue, cudaStream_t stream, bool cc20);
static const caller_t callers[3][5] =
{
{
RemapDispatcher<PointFilter, BrdConstant, T>::call,
RemapDispatcher<PointFilter, BrdReplicate, T>::call,
RemapDispatcher<PointFilter, BrdReflect, T>::call,
RemapDispatcher<PointFilter, BrdWrap, T>::call,
RemapDispatcher<PointFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<LinearFilter, BrdConstant, T>::call,
RemapDispatcher<LinearFilter, BrdReplicate, T>::call,
RemapDispatcher<LinearFilter, BrdReflect, T>::call,
RemapDispatcher<LinearFilter, BrdWrap, T>::call,
RemapDispatcher<LinearFilter, BrdReflect101, T>::call
},
{
RemapDispatcher<CubicFilter, BrdConstant, T>::call,
RemapDispatcher<CubicFilter, BrdReplicate, T>::call,
RemapDispatcher<CubicFilter, BrdReflect, T>::call,
RemapDispatcher<CubicFilter, BrdWrap, T>::call,
RemapDispatcher<CubicFilter, BrdReflect101, T>::call
}
};
callers[interpolation][borderMode](static_cast<PtrStepSz<T>>(src), static_cast<PtrStepSz<T>>(srcWhole), xoff, yoff, xmap, ymap,
static_cast<PtrStepSz<T>>(dst), borderValue, stream, cc20);
}
template void remap_gpu<uchar >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<uchar3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<uchar4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<ushort4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<short4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float >(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float3>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
template void remap_gpu<float4>(PtrStepSzb src, PtrStepSzb srcWhole, int xoff, int yoff, PtrStepSzf xmap, PtrStepSzf ymap, PtrStepSzb dst, int interpolation, int borderMode, const float* borderValue, cudaStream_t stream, bool cc20);
} // namespace imgproc
}}} // namespace cv { namespace cuda { namespace cudev
#endif /* CUDA_DISABLER */
|
58836b739a129dd83a1909196d52af1c97de03d2.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2021 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "array_interface.h"
#include "../common/device_helpers.cuh"
#include "device_adapter_hip.cuh"
#include "simple_dmatrix.h"
namespace xgboost {
void CopyInfoImpl(ArrayInterface column, HostDeviceVector<float>* out) {
auto SetDeviceToPtr = [](void* ptr) {
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
if (ptr_device >= 0) {
dh::safe_cuda(hipSetDevice(ptr_device));
}
return ptr_device;
};
auto ptr_device = SetDeviceToPtr(column.data);
if (column.num_rows == 0) {
return;
}
out->SetDevice(ptr_device);
out->Resize(column.num_rows);
auto p_dst = thrust::device_pointer_cast(out->DevicePointer());
dh::LaunchN(column.num_rows, [=] __device__(size_t idx) {
p_dst[idx] = column.GetElement(idx, 0);
});
}
namespace {
auto SetDeviceToPtr(void *ptr) {
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(hipSetDevice(ptr_device));
return ptr_device;
}
} // anonymous namespace
void CopyGroupInfoImpl(ArrayInterface column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterface::kF4 && column.type != ArrayInterface::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.num_rows);
auto d_tmp = temp.data();
dh::LaunchN(column.num_rows, [=] __device__(size_t idx) {
d_tmp[idx] = column.GetElement<size_t>(idx, 0);
});
auto length = column.num_rows;
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface array_interface,
std::vector<bst_group_t> *p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul),
[array_interface] __device__(size_t i) {
return array_interface.GetElement<uint32_t>(i, 0);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.num_rows - 1, [=] __device__(size_t i) {
if (array_interface.GetElement<uint32_t>(i, 0) >
array_interface.GetElement<uint32_t>(i + 1, 0)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(hipMemcpy(&non_dec, flag.data().get(), sizeof(bool),
hipMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.num_rows);
dh::caching_device_vector<uint32_t> cnt(array_interface.num_rows);
HostDeviceVector<int> d_num_runs_out(1, 0, d);
hipcub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.num_rows);
dh::caching_device_vector<char> tmp(bytes);
hipcub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.num_rows);
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::hip::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
namespace {
// thrust::all_of tries to copy lambda function.
struct LabelsCheck {
__device__ bool operator()(float y) { return ::isnan(y) || ::isinf(y); }
};
struct WeightsCheck {
__device__ bool operator()(float w) { return LabelsCheck{}(w) || w < 0; } // NOLINT
};
} // anonymous namespace
void ValidateQueryGroup(std::vector<bst_group_t> const &group_ptr_);
void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) {
Json j_interface = Json::Load({interface_str.c_str(), interface_str.size()});
auto const& j_arr = get<Array>(j_interface);
CHECK_EQ(j_arr.size(), 1)
<< "MetaInfo: " << c_key << ". " << ArrayInterfaceErrors::Dimension(1);
ArrayInterface array_interface(interface_str);
std::string key{c_key};
if (!((array_interface.num_cols == 1 && array_interface.num_rows == 0) ||
(array_interface.num_cols == 0 && array_interface.num_rows == 1))) {
// Not an empty column, transform it.
array_interface.AsColumnVector();
}
CHECK(!array_interface.valid.Data())
<< "Meta info " << key << " should be dense, found validity mask";
if (array_interface.num_rows == 0) {
return;
}
if (key == "label") {
CopyInfoImpl(array_interface, &labels_);
auto ptr = labels_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels_.Size(),
LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
} else if (key == "weight") {
CopyInfoImpl(array_interface, &weights_);
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(),
WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "base_margin") {
CopyInfoImpl(array_interface, &base_margin_);
} else if (key == "group") {
CopyGroupInfoImpl(array_interface, &group_ptr_);
ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
CopyQidImpl(array_interface, &group_ptr_);
return;
} else if (key == "label_lower_bound") {
CopyInfoImpl(array_interface, &labels_lower_bound_);
return;
} else if (key == "label_upper_bound") {
CopyInfoImpl(array_interface, &labels_upper_bound_);
return;
} else if (key == "feature_weights") {
CopyInfoImpl(array_interface, &feature_weigths);
auto d_feature_weights = feature_weigths.ConstDeviceSpan();
auto valid = thrust::none_of(
thrust::device, d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
return;
} else {
LOG(FATAL) << "Unknown metainfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
} // namespace xgboost
| 58836b739a129dd83a1909196d52af1c97de03d2.cu | /*!
* Copyright 2019-2021 by XGBoost Contributors
*
* \file data.cu
* \brief Handles setting metainfo from array interface.
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "array_interface.h"
#include "../common/device_helpers.cuh"
#include "device_adapter.cuh"
#include "simple_dmatrix.h"
namespace xgboost {
void CopyInfoImpl(ArrayInterface column, HostDeviceVector<float>* out) {
auto SetDeviceToPtr = [](void* ptr) {
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
if (ptr_device >= 0) {
dh::safe_cuda(cudaSetDevice(ptr_device));
}
return ptr_device;
};
auto ptr_device = SetDeviceToPtr(column.data);
if (column.num_rows == 0) {
return;
}
out->SetDevice(ptr_device);
out->Resize(column.num_rows);
auto p_dst = thrust::device_pointer_cast(out->DevicePointer());
dh::LaunchN(column.num_rows, [=] __device__(size_t idx) {
p_dst[idx] = column.GetElement(idx, 0);
});
}
namespace {
auto SetDeviceToPtr(void *ptr) {
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, ptr));
int32_t ptr_device = attr.device;
dh::safe_cuda(cudaSetDevice(ptr_device));
return ptr_device;
}
} // anonymous namespace
void CopyGroupInfoImpl(ArrayInterface column, std::vector<bst_group_t>* out) {
CHECK(column.type != ArrayInterface::kF4 && column.type != ArrayInterface::kF8)
<< "Expected integer for group info.";
auto ptr_device = SetDeviceToPtr(column.data);
CHECK_EQ(ptr_device, dh::CurrentDevice());
dh::TemporaryArray<bst_group_t> temp(column.num_rows);
auto d_tmp = temp.data();
dh::LaunchN(column.num_rows, [=] __device__(size_t idx) {
d_tmp[idx] = column.GetElement<size_t>(idx, 0);
});
auto length = column.num_rows;
out->resize(length + 1);
out->at(0) = 0;
thrust::copy(temp.data(), temp.data() + length, out->begin() + 1);
std::partial_sum(out->begin(), out->end(), out->begin());
}
void CopyQidImpl(ArrayInterface array_interface,
std::vector<bst_group_t> *p_group_ptr) {
auto &group_ptr_ = *p_group_ptr;
auto it = dh::MakeTransformIterator<uint32_t>(
thrust::make_counting_iterator(0ul),
[array_interface] __device__(size_t i) {
return array_interface.GetElement<uint32_t>(i, 0);
});
dh::caching_device_vector<bool> flag(1);
auto d_flag = dh::ToSpan(flag);
auto d = SetDeviceToPtr(array_interface.data);
dh::LaunchN(1, [=] __device__(size_t) { d_flag[0] = true; });
dh::LaunchN(array_interface.num_rows - 1, [=] __device__(size_t i) {
if (array_interface.GetElement<uint32_t>(i, 0) >
array_interface.GetElement<uint32_t>(i + 1, 0)) {
d_flag[0] = false;
}
});
bool non_dec = true;
dh::safe_cuda(cudaMemcpy(&non_dec, flag.data().get(), sizeof(bool),
cudaMemcpyDeviceToHost));
CHECK(non_dec) << "`qid` must be sorted in increasing order along with data.";
size_t bytes = 0;
dh::caching_device_vector<uint32_t> out(array_interface.num_rows);
dh::caching_device_vector<uint32_t> cnt(array_interface.num_rows);
HostDeviceVector<int> d_num_runs_out(1, 0, d);
cub::DeviceRunLengthEncode::Encode(
nullptr, bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.num_rows);
dh::caching_device_vector<char> tmp(bytes);
cub::DeviceRunLengthEncode::Encode(
tmp.data().get(), bytes, it, out.begin(), cnt.begin(),
d_num_runs_out.DevicePointer(), array_interface.num_rows);
auto h_num_runs_out = d_num_runs_out.HostSpan()[0];
group_ptr_.clear();
group_ptr_.resize(h_num_runs_out + 1, 0);
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::inclusive_scan(thrust::cuda::par(alloc), cnt.begin(),
cnt.begin() + h_num_runs_out, cnt.begin());
thrust::copy(cnt.begin(), cnt.begin() + h_num_runs_out,
group_ptr_.begin() + 1);
}
namespace {
// thrust::all_of tries to copy lambda function.
struct LabelsCheck {
__device__ bool operator()(float y) { return ::isnan(y) || ::isinf(y); }
};
struct WeightsCheck {
__device__ bool operator()(float w) { return LabelsCheck{}(w) || w < 0; } // NOLINT
};
} // anonymous namespace
void ValidateQueryGroup(std::vector<bst_group_t> const &group_ptr_);
void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) {
Json j_interface = Json::Load({interface_str.c_str(), interface_str.size()});
auto const& j_arr = get<Array>(j_interface);
CHECK_EQ(j_arr.size(), 1)
<< "MetaInfo: " << c_key << ". " << ArrayInterfaceErrors::Dimension(1);
ArrayInterface array_interface(interface_str);
std::string key{c_key};
if (!((array_interface.num_cols == 1 && array_interface.num_rows == 0) ||
(array_interface.num_cols == 0 && array_interface.num_rows == 1))) {
// Not an empty column, transform it.
array_interface.AsColumnVector();
}
CHECK(!array_interface.valid.Data())
<< "Meta info " << key << " should be dense, found validity mask";
if (array_interface.num_rows == 0) {
return;
}
if (key == "label") {
CopyInfoImpl(array_interface, &labels_);
auto ptr = labels_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + labels_.Size(),
LabelsCheck{});
CHECK(valid) << "Label contains NaN, infinity or a value too large.";
} else if (key == "weight") {
CopyInfoImpl(array_interface, &weights_);
auto ptr = weights_.ConstDevicePointer();
auto valid = thrust::none_of(thrust::device, ptr, ptr + weights_.Size(),
WeightsCheck{});
CHECK(valid) << "Weights must be positive values.";
} else if (key == "base_margin") {
CopyInfoImpl(array_interface, &base_margin_);
} else if (key == "group") {
CopyGroupInfoImpl(array_interface, &group_ptr_);
ValidateQueryGroup(group_ptr_);
return;
} else if (key == "qid") {
CopyQidImpl(array_interface, &group_ptr_);
return;
} else if (key == "label_lower_bound") {
CopyInfoImpl(array_interface, &labels_lower_bound_);
return;
} else if (key == "label_upper_bound") {
CopyInfoImpl(array_interface, &labels_upper_bound_);
return;
} else if (key == "feature_weights") {
CopyInfoImpl(array_interface, &feature_weigths);
auto d_feature_weights = feature_weigths.ConstDeviceSpan();
auto valid = thrust::none_of(
thrust::device, d_feature_weights.data(),
d_feature_weights.data() + d_feature_weights.size(), WeightsCheck{});
CHECK(valid) << "Feature weight must be greater than 0.";
return;
} else {
LOG(FATAL) << "Unknown metainfo: " << key;
}
}
template <typename AdapterT>
DMatrix* DMatrix::Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix) {
CHECK_EQ(cache_prefix.size(), 0)
<< "Device memory construction is not currently supported with external "
"memory.";
return new data::SimpleDMatrix(adapter, missing, nthread);
}
template DMatrix* DMatrix::Create<data::CudfAdapter>(
data::CudfAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
template DMatrix* DMatrix::Create<data::CupyAdapter>(
data::CupyAdapter* adapter, float missing, int nthread,
const std::string& cache_prefix);
} // namespace xgboost
|
292edd1399ba9d9386d66ed27ef03d0b9159ccdb.hip | // !!! This is a file automatically generated by hipify!!!
#include "IPsecHMACSHA1AES_kernel_core.hh"
#include "IPsecHMACSHA1AES_kernel.hh"
#include <hip/hip_runtime.h>
#include "../../engines/cuda/utils.hh"
#include <stdint.h>
#include <assert.h>
#include <stdio.h>
/*******************************************************************
HMAC-SHA1 kernel
******************************************************************/
#ifdef __DEVICE_EMULATION__
#define debugprint printf
#define EMUSYNC __syncthreads()
#else
__device__ void _NOOPfunction(char *format) {
}
__device__ void _NOOPfunction(char *format, unsigned int onearg) {
}
__device__ void _NOOPfunction(char *format, unsigned int onearg,
unsigned int twoargs) {
}
__device__ void _NOOPfunction(char *format, char *onearg) {
}
#define EMUSYNC do {} while (0)
#define debugprint _NOOPfunction
#endif
#define SHA1_THREADS_PER_BLK 32
//__global__ uint32_t d_pad_buffer[16 * 2 * MAX_CHUNK_SIZE * MAX_GROUP_SIZE];
__device__ uint32_t swap(uint32_t v) {
return ((v & 0x000000ffU) << 24) | ((v & 0x0000ff00U) << 8)
| ((v & 0x00ff0000U) >> 8) | ((v & 0xff000000U) >> 24);
}
typedef struct hash_digest {
uint32_t h1;
uint32_t h2;
uint32_t h3;
uint32_t h4;
uint32_t h5;
} hash_digest_t;
#define HMAC
__inline__ __device__ void getBlock(char* buf, int offset, int len,
uint32_t* dest) {
uint32_t *tmp;
unsigned int tempbuf[16];
tmp = (uint32_t*) (buf + offset);
debugprint("%d %d\n", offset, len);
if (offset + 64 <= len) {
debugprint("--0--\n");
#pragma unroll 16
for (int i = 0; i < 16; i++) {
dest[i] = swap(tmp[i]);
}
} else if (len > offset && (len - offset) < 56) { //case 1 enough space in last block for padding
debugprint("--1--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
//debugprint("%d %d\n",offset,i);
//debugprint("%p %p\n", buf, dest);
//tempbuf[i] = buf[i];
tempbuf[i] = swap(tmp[i]);
}
//printf("len%%4 %d\n",len%4);
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 14; i++) {
tempbuf[i] = 0;
}
#pragma unroll 14
for (i = 0; i < 14; i++) {
dest[i] = tempbuf[i];
}
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (len > offset && (len - offset) >= 56) { //case 2 not enough space in last block (containing message) for padding
debugprint("--2--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
tempbuf[i] = swap(tmp[i]);
}
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 16; i++) {
tempbuf[i] = 0x00000000;
}
#pragma unroll 16
for (i = 0; i < 16; i++) {
dest[i] = tempbuf[i];
}
} else if (offset == len) { //message end is aligned in 64 bytes
debugprint("--3--\n");
dest[0] = swap(0x00000080);
#pragma unroll 13
for (int i = 1; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (offset > len) { //the last block in case 2
debugprint("--4--\n");
#pragma unroll 14
for (int i = 0; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else {
debugprint("Not supposed to happen\n");
}
}
__device__ void computeSHA1Block(char* in, uint32_t* w, int offset, int len,
hash_digest_t &h) {
uint32_t a = h.h1;
uint32_t b = h.h2;
uint32_t c = h.h3;
uint32_t d = h.h4;
uint32_t e = h.h5;
uint32_t f;
uint32_t k;
uint32_t temp;
getBlock(in, offset, len, w);
//for (int i = 0; i < 16 ; i++) {
// debugprint("%0X\n", w[i]);
//}
//debugprint("\n");
k = 0x5A827999;
//0 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//1 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//2 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//3 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//4 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//5 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//6 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//7 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//8 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//9 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//10 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//11 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//12 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//13 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//14 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//15 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//16 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//17 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//18 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//19 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
k = 0x6ED9EBA1;
//20 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//21 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//22 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//23 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//24 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//25 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//26 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//27 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//28 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//29 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//30 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//31 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//32 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//33 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//34 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//35 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//36 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//37 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//38 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//39 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
k = 0x8F1BBCDC;
//40 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//41 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//42 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//43 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//44 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//45 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//46 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//47 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//48 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//49 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//50 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//51 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//52 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//53 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//54 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//55 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//56 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//57 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//58 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//59 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
k = 0xCA62C1D6;
//60 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//61 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//62 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//63 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//64 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//65 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//66 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//67 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//68 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//69 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//70 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//71 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//72 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//73 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//74 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//75 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//76 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//77 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//78 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//79 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
h.h1 += a;
h.h2 += b;
h.h3 += c;
h.h4 += d;
h.h5 += e;
}
/*
__global__ void computeSHA1(char* buf, int *offsets, int *len, char* output, int N)
{
//__shared__ uint32_t w_shared[16*SHA1_THREADS_PER_BLK];
uint32_t w_register[16];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
uint32_t *w = w_register;//w_shared + 16*threadIdx.x;
hash_digest_t h;
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
int num_iter = (len[index]+63+9)/64;
debugprint("num_iter %d\n", num_iter);
for(int i = 0; i < num_iter; i++)
computeSHA1Block(buf + offsets[index], w, i*64 , len[index], h);
h.h1 = swap(h.h1);
h.h2 = swap(h.h2);
h.h3 = swap(h.h3);
h.h4 = swap(h.h4);
h.h5 = swap(h.h5);
uint32_t * out = (uint32_t*)(output + index*20);
*(out++) = h.h1;
*(out++) = h.h2;
*(out++) = h.h3;
*(out++) = h.h4;
*(out++) = h.h5;
}
}*/
/*
some how *pad = *pad++ ^ *key++
was optimized and does not work correctly in GPU oTL.
*/
__device__ void xorpads(uint32_t *pad, uint32_t* key) {
#pragma unroll 16
for (int i = 0; i < 16; i++)
*(pad + i) = *(pad + i) ^ *(key + i);
}
/*
uint32_t opad[16] =
{ 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, };
uint32_t ipad[16] =
{ 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, };
*/
// in: start pointer of the data to be authenticated by hsha1.
// out: start pointer of the data where hsha1 signature will be recorded.
// length: length of the data to be authenticated by hsha1.
// key: hmac key.
__device__ void HMAC_SHA1(uint32_t *in, uint32_t *out, uint32_t length,
char *key) {
uint32_t w_register[16];
uint32_t *w = w_register; //w_shared + 16*threadIdx.x;
hash_digest_t h;
for (int i = 0; i < 16; i++)
w[i] = 0x36363636;
xorpads(w, (uint32_t*) (key));
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
//SHA1 compute on ipad
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA1 compute on mesage
int num_iter = (length + 63 + 9) / 64;
for (int i = 0; i < num_iter; i++)
computeSHA1Block((char*) in, w, i * 64, length, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
for (int i = 0; i < 16; i++)
w[i] = 0x5c5c5c5c;
xorpads(w, (uint32_t*) (key));
//SHA 1 compute on opads
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA 1 compute on (hash of ipad|m)
computeSHA1Block((char*) out, w, 0, 20, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
}
/*******************************************************************
AES CBC kernel
******************************************************************/
/* former prototype
__global__ void
AES_cbc_128_encrypt_kernel_SharedMem(const uint8_t *in_all,
uint8_t *out_all,
const uint32_t *pkt_offset,
const uint8_t *keys,
uint8_t *ivs,
const unsigned int num_flows,
uint8_t *checkbits = 0)
*/
__global__ void
AES_cbc_128_encrypt_kernel_SharedMem(
const uint8_t *in_all,
uint8_t *out_all,
size_t *input_size_arr,
size_t *output_size_arr,
int num_flows,
uint8_t *checkbits,
int *key_idxs,
struct aes_sa_entry *key_array,
uint8_t *ivs,
const uint32_t *pkt_offset
)
{
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_flows)
return;
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= num_flows)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
/* make sure T boxes have been initialized. */
__syncthreads();
/* Locate data */
const uint8_t *in = pkt_offset[idx] + in_all;
uint8_t *out = pkt_offset[idx] + out_all;
/*
int temp = key_idxs[idx];
assert(temp == key_array[temp].entry_idx);
assert(key_array[temp].aes_key != NULL);
*/
const uint8_t *key = key_array[key_idxs[idx]].aes_key;
uint8_t *ivec = idx * AES_BLOCK_SIZE + ivs;
/* Encrypt using cbc mode */
unsigned long len = pkt_offset[idx + 1] - pkt_offset[idx];
const unsigned char *iv = ivec;
while (len >= AES_BLOCK_SIZE) {
*((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1);
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
for(unsigned n = 0; n < len; ++n)
out[n] = in[n] ^ iv[n];
for(unsigned n = len; n < AES_BLOCK_SIZE; ++n)
out[n] = iv[n];
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
}
*((uint4*)ivec) = *((uint4*)iv);
__syncthreads();
if (threadIdx.x == 0 && checkbits != 0)
*(checkbits + blockIdx.x) = 1;
}
__global__
void AES_cbc_128_decrypt_kernel_SharedMem(const uint8_t *in_all,
uint8_t *out_all,
uint8_t *keys,
uint8_t *ivs,
uint16_t *pkt_index,
unsigned long block_count,
uint8_t *checkbits = 0
)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ uint32_t shared_Td0[256];
__shared__ uint32_t shared_Td1[256];
__shared__ uint32_t shared_Td2[256];
__shared__ uint32_t shared_Td3[256];
__shared__ uint8_t shared_Td4[256];
__shared__ uint32_t shared_Rcon[10];
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
/* computer the thread id */
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= 256)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
shared_Td0[index] = Td0_ConstMem[index];
shared_Td1[index] = Td1_ConstMem[index];
shared_Td2[index] = Td2_ConstMem[index];
shared_Td3[index] = Td3_ConstMem[index];
shared_Td4[index] = Td4_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
for (unsigned i = 0; i * blockDim.x < 10; i++) {
int index = threadIdx.x + blockDim.x * i;
if (index < 10) {
shared_Rcon[index] = rcon[index];
}
}
__syncthreads();
if (idx >= block_count)
return;
/* Locate data */
const uint8_t *in = idx * AES_BLOCK_SIZE + in_all;
uint8_t *out = idx * AES_BLOCK_SIZE + out_all;
uint16_t packet_index = pkt_index[idx];
uint32_t rk[4];
rk[0] = *((uint32_t*)(keys + 16 * packet_index));
rk[1] = *((uint32_t*)(keys + 16 * packet_index + 4));
rk[2] = *((uint32_t*)(keys + 16 * packet_index + 8));
rk[3] = *((uint32_t*)(keys + 16 * packet_index + 12));
uint8_t *ivec = packet_index * AES_BLOCK_SIZE + ivs;
/* Decrypt using cbc mode */
const unsigned char *iv;
if (idx == 0 || pkt_index[idx] != pkt_index[idx-1])
iv = ivec;
else
iv = in - AES_BLOCK_SIZE;
AES_128_decrypt(in, out, rk,
shared_Td0, shared_Td1, shared_Td2, shared_Td3, shared_Td4,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
*((uint64_t*)out) = *((uint64_t*)out) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)out) + 1) ^ *(((uint64_t*)iv) + 1);
__syncthreads();
if (threadIdx.x == 0 && checkbits != 0)
*(checkbits + blockIdx.x) = 1;
}
/*******************************************************************
AES ECB kernel
******************************************************************/
__global__ void
AES_ecb_encrypt_kernel(const uint8_t *in_all,
uint8_t *out_all,
const uint8_t *keys,
uint16_t *pkt_index,
unsigned long block_count
)
{
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
/* initialize T boxes, #threads in block should be larger than 256 */
for (unsigned i = 0; i * blockDim.x < 256; i++) {
unsigned index = i * blockDim.x + threadIdx.x;
if (index >= 256)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for (unsigned i = 0; i * blockDim.x < 10; i++) {
unsigned index = threadIdx.x + blockDim.x * i;
if (index < 10) {
shared_Rcon[index] = rcon[index];
}
}
if (idx >= block_count)
return;
/* make sure T boxes have been initialized. */
__syncthreads();
/* Locate data */
const uint8_t *in = idx * AES_BLOCK_SIZE + in_all;
uint8_t *out = idx * AES_BLOCK_SIZE + out_all;
uint16_t pktIndex = pkt_index[idx];
const uint8_t *key = pktIndex * 16 + keys;
AES_128_encrypt(in, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
}
/**************************************************************************
Exported C++ function wrapper function for CUDA kernel
***************************************************************************/
/*
* Sangwook: Those wrapper functions are not used in NBA.
void AES_cbc_128_decrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
uint8_t *keys_d,
uint8_t *ivs_d,
uint16_t *pkt_index_d,
unsigned long block_count,
uint8_t *checkbits_d,
const unsigned int threads_per_blk,
hipStream_t stream )
{
unsigned int num_cuda_blks = (block_count+threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
AES_cbc_128_decrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk>>>(
in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d);
} else {
AES_cbc_128_decrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk, 0, stream>>>(
in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d);
}
}
void AES_cbc_128_encrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
const uint32_t *pkt_offset_d,
const uint8_t *keys_d,
uint8_t *ivs_d,
const unsigned int num_flows,
uint8_t *checkbits_d,
const unsigned int threads_per_blk,
hipStream_t stream)
{
unsigned int num_cuda_blks = (num_flows+threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk>>>(
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
} else {
AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk, 0, stream>>>(
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
}
}
void AES_ecb_128_encrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
const uint8_t *keys_d,
uint16_t *pkt_index_d,
unsigned long block_count,
const unsigned int threads_per_blk,
hipStream_t stream)
{
unsigned int num_cuda_blks = (block_count + threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
AES_ecb_encrypt_kernel<<<num_cuda_blks, threads_per_blk>>>(
in_d, out_d, keys_d, pkt_index_d, block_count);
} else {
AES_ecb_encrypt_kernel<<<num_cuda_blks, threads_per_blk, 0, stream>>>(
in_d, out_d, keys_d, pkt_index_d, block_count);
}
}
*/
/**************************************************************************
Key Setup for Decryption
***************************************************************************/
void AES_decrypt_key_prepare(uint8_t *dec_key,
const uint8_t *enc_key,
unsigned int key_bits)
{
uint32_t rk_buf[60];
uint32_t *rk = rk_buf;
int i = 0;
uint32_t temp;
rk[0] = GETU32(enc_key );
rk[1] = GETU32(enc_key + 4);
rk[2] = GETU32(enc_key + 8);
rk[3] = GETU32(enc_key + 12);
if (key_bits == 128) {
for (;;) {
temp = rk[3];
rk[4] = rk[0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon_host[i];
rk[5] = rk[1] ^ rk[4];
rk[6] = rk[2] ^ rk[5];
rk[7] = rk[3] ^ rk[6];
if (++i == 10) {
rk += 4;
goto end;
}
rk += 4;
}
}
rk[4] = GETU32(enc_key + 16);
rk[5] = GETU32(enc_key + 20);
if (key_bits == 192) {
for (;;) {
temp = rk[ 5];
rk[ 6] = rk[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon_host[i];
rk[ 7] = rk[ 1] ^ rk[ 6];
rk[ 8] = rk[ 2] ^ rk[ 7];
rk[ 9] = rk[ 3] ^ rk[ 8];
if (++i == 8) {
rk += 6;
goto end;
}
rk[10] = rk[ 4] ^ rk[ 9];
rk[11] = rk[ 5] ^ rk[10];
rk += 6;
}
}
rk[6] = GETU32(enc_key + 24);
rk[7] = GETU32(enc_key + 28);
if (key_bits == 256) {
for (;;) {
temp = rk[ 7];
rk[ 8] = rk[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon_host[i];
rk[ 9] = rk[ 1] ^ rk[ 8];
rk[10] = rk[ 2] ^ rk[ 9];
rk[11] = rk[ 3] ^ rk[10];
if (++i == 7) {
rk += 8;
goto end;
}
temp = rk[11];
rk[12] = rk[ 4] ^
(Te4[(temp >> 24) ] & 0xff000000) ^
(Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(temp ) & 0xff] & 0x000000ff);
rk[13] = rk[ 5] ^ rk[12];
rk[14] = rk[ 6] ^ rk[13];
rk[15] = rk[ 7] ^ rk[14];
rk += 8;
}
}
end:
memcpy(dec_key, rk, 16);
}
/**************************************************************************
Experimental Codes
***************************************************************************/
/*
__global__ void computeHMAC_SHA1_AES(
uint8_t *input_buf, uint8_t *output,
size_t *input_size_arr, size_t *output_size_arr,
int N, uint8_t *checkbits_d,
int *key_idxs,
struct hmac_sa_entry *hmac_aes_key_array,
int32_t *offsets)
*/
__global__ void computeHMAC_SHA1_AES(
uint8_t* input_buf, uint8_t *output_buf,
size_t *input_size_arr, size_t *output_size_arr,
int N, uint8_t *checkbits_d,
const uint8_t* __restrict__ ivs,
const int32_t* __restrict__ key_idxs, const struct hmac_aes_sa_entry* __restrict__ hmac_aes_key_array,
const int32_t* __restrict__ offsets)
{
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
/* Locate data */
const uint8_t *in = input_buf + offsets[idx];
uint8_t *out = output_buf + offsets[idx];
/*
int temp = key_idxs[idx];
assert(temp == key_array[temp].entry_idx);
assert(key_array[temp].aes_key != NULL);
*/
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= N)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
/* make sure T boxes have been initialized. */
// __syncthreads();
const uint8_t *key = (const uint8_t*) hmac_aes_key_array[key_idxs[idx]].aes_key;
uint8_t *ivec = (uint8_t*) (idx * AES_BLOCK_SIZE + ivs);
/* Encrypt using cbc mode */
unsigned long len = (unsigned long) input_size_arr[idx];
const unsigned char *iv = ivec;
while (len >= AES_BLOCK_SIZE) {
*((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1);
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
for(unsigned n = 0; n < len; ++n)
out[n] = in[n] ^ iv[n];
for(unsigned n = len; n < AES_BLOCK_SIZE; ++n)
out[n] = iv[n];
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
}
*((uint4*)ivec) = *((uint4*)iv);
// __syncthreads();
// HMAC-SHA1 hashing
int32_t offset = offsets[idx];
char *hmac_key = (char *) hmac_aes_key_array[key_idxs[idx]].hmac_key;
uint16_t length = (uint16_t) input_size_arr[idx];
if (offset != -1) {
// printf("TID:%4d \t Offset %10u, Length %10u\n", idx, offset, length);
HMAC_SHA1((uint32_t*) (input_buf + offset), (uint32_t*) (output_buf + idx * SHA_DIGEST_LENGTH), length, (char*)hmac_key);
// output_size_arr[idx] = SHA_DIGEST_LENGTH; // as output_roi is CUSTOMDATA, output_size_arr is not used.
}
}
__syncthreads();
if (threadIdx.x == 0 && checkbits_d != 0)
*(checkbits_d + blockIdx.x) = 1;
}
/* Among AES_cbc_128_decryption, AES_cbc_128_encryption,
* AES_ecb_128_encryption and AES_decrypt_key_prepare(),
* AES_cbc_128_encrypt_gpu() is only used in NBA, for now. */
void *nba::ipsec_hmac_sha1_aes_get_cuda_kernel() {
return reinterpret_cast<void *> (computeHMAC_SHA1_AES);
}
| 292edd1399ba9d9386d66ed27ef03d0b9159ccdb.cu | #include "IPsecHMACSHA1AES_kernel_core.hh"
#include "IPsecHMACSHA1AES_kernel.hh"
#include <cuda.h>
#include "../../engines/cuda/utils.hh"
#include <stdint.h>
#include <assert.h>
#include <stdio.h>
/*******************************************************************
HMAC-SHA1 kernel
******************************************************************/
#ifdef __DEVICE_EMULATION__
#define debugprint printf
#define EMUSYNC __syncthreads()
#else
__device__ void _NOOPfunction(char *format) {
}
__device__ void _NOOPfunction(char *format, unsigned int onearg) {
}
__device__ void _NOOPfunction(char *format, unsigned int onearg,
unsigned int twoargs) {
}
__device__ void _NOOPfunction(char *format, char *onearg) {
}
#define EMUSYNC do {} while (0)
#define debugprint _NOOPfunction
#endif
#define SHA1_THREADS_PER_BLK 32
//__global__ uint32_t d_pad_buffer[16 * 2 * MAX_CHUNK_SIZE * MAX_GROUP_SIZE];
__device__ uint32_t swap(uint32_t v) {
return ((v & 0x000000ffU) << 24) | ((v & 0x0000ff00U) << 8)
| ((v & 0x00ff0000U) >> 8) | ((v & 0xff000000U) >> 24);
}
typedef struct hash_digest {
uint32_t h1;
uint32_t h2;
uint32_t h3;
uint32_t h4;
uint32_t h5;
} hash_digest_t;
#define HMAC
__inline__ __device__ void getBlock(char* buf, int offset, int len,
uint32_t* dest) {
uint32_t *tmp;
unsigned int tempbuf[16];
tmp = (uint32_t*) (buf + offset);
debugprint("%d %d\n", offset, len);
if (offset + 64 <= len) {
debugprint("--0--\n");
#pragma unroll 16
for (int i = 0; i < 16; i++) {
dest[i] = swap(tmp[i]);
}
} else if (len > offset && (len - offset) < 56) { //case 1 enough space in last block for padding
debugprint("--1--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
//debugprint("%d %d\n",offset,i);
//debugprint("%p %p\n", buf, dest);
//tempbuf[i] = buf[i];
tempbuf[i] = swap(tmp[i]);
}
//printf("len%%4 %d\n",len%4);
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 14; i++) {
tempbuf[i] = 0;
}
#pragma unroll 14
for (i = 0; i < 14; i++) {
dest[i] = tempbuf[i];
}
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (len > offset && (len - offset) >= 56) { //case 2 not enough space in last block (containing message) for padding
debugprint("--2--\n");
int i;
for (i = 0; i < (len - offset) / 4; i++) {
tempbuf[i] = swap(tmp[i]);
}
switch (len % 4) {
case 0:
tempbuf[i] = swap(0x00000080);
i++;
break;
case 1:
tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF));
i++;
break;
case 2:
tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF));
i++;
break;
case 3:
tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF));
i++;
break;
};
for (; i < 16; i++) {
tempbuf[i] = 0x00000000;
}
#pragma unroll 16
for (i = 0; i < 16; i++) {
dest[i] = tempbuf[i];
}
} else if (offset == len) { //message end is aligned in 64 bytes
debugprint("--3--\n");
dest[0] = swap(0x00000080);
#pragma unroll 13
for (int i = 1; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else if (offset > len) { //the last block in case 2
debugprint("--4--\n");
#pragma unroll 14
for (int i = 0; i < 14; i++)
dest[i] = 0x00000000;
dest[14] = 0x00000000;
#ifndef HMAC
dest[15] = len * 8;
#else
dest[15] = (len + 64) * 8;
#endif
} else {
debugprint("Not supposed to happen\n");
}
}
__device__ void computeSHA1Block(char* in, uint32_t* w, int offset, int len,
hash_digest_t &h) {
uint32_t a = h.h1;
uint32_t b = h.h2;
uint32_t c = h.h3;
uint32_t d = h.h4;
uint32_t e = h.h5;
uint32_t f;
uint32_t k;
uint32_t temp;
getBlock(in, offset, len, w);
//for (int i = 0; i < 16 ; i++) {
// debugprint("%0X\n", w[i]);
//}
//debugprint("\n");
k = 0x5A827999;
//0 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//1 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//2 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//3 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//4 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//5 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//6 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//7 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//8 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//9 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//10 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//11 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//12 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//13 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//14 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//15 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//16 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//17 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//18 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//19 of 0-20
f = (b & c) | ((~b) & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
k = 0x6ED9EBA1;
//20 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//21 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//22 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//23 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//24 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//25 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//26 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//27 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//28 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//29 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//30 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//31 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//32 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//33 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//34 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//35 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//36 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//37 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//38 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//39 of 20-40
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
k = 0x8F1BBCDC;
//40 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//41 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//42 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//43 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
//44 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//45 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//46 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//47 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//48 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[0] = w[13] ^ w[8] ^ w[2] ^ w[0];
w[0] = w[0] << 1 | w[0] >> 31;
//49 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[1] = w[14] ^ w[9] ^ w[3] ^ w[1];
w[1] = w[1] << 1 | w[1] >> 31;
//50 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[2] = w[15] ^ w[10] ^ w[4] ^ w[2];
w[2] = w[2] << 1 | w[2] >> 31;
//51 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[3] = w[0] ^ w[11] ^ w[5] ^ w[3];
w[3] = w[3] << 1 | w[3] >> 31;
//52 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[4] = w[1] ^ w[12] ^ w[6] ^ w[4];
w[4] = w[4] << 1 | w[4] >> 31;
//53 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[5] = w[2] ^ w[13] ^ w[7] ^ w[5];
w[5] = w[5] << 1 | w[5] >> 31;
//54 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[6] = w[3] ^ w[14] ^ w[8] ^ w[6];
w[6] = w[6] << 1 | w[6] >> 31;
//55 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[7] = w[4] ^ w[15] ^ w[9] ^ w[7];
w[7] = w[7] << 1 | w[7] >> 31;
//56 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[8] = w[5] ^ w[0] ^ w[10] ^ w[8];
w[8] = w[8] << 1 | w[8] >> 31;
//57 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[9] = w[6] ^ w[1] ^ w[11] ^ w[9];
w[9] = w[9] << 1 | w[9] >> 31;
//58 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[10] = w[7] ^ w[2] ^ w[12] ^ w[10];
w[10] = w[10] << 1 | w[10] >> 31;
//59 of 40-60
f = (b & c) | (b & d) | (c & d);
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[11] = w[8] ^ w[3] ^ w[13] ^ w[11];
w[11] = w[11] << 1 | w[11] >> 31;
k = 0xCA62C1D6;
//60 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[12] = w[9] ^ w[4] ^ w[14] ^ w[12];
w[12] = w[12] << 1 | w[12] >> 31;
//61 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[13] = w[10] ^ w[5] ^ w[15] ^ w[13];
w[13] = w[13] << 1 | w[13] >> 31;
//62 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[14] = w[11] ^ w[6] ^ w[0] ^ w[14];
w[14] = w[14] << 1 | w[14] >> 31;
//63 of 60-64
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
w[15] = w[12] ^ w[7] ^ w[1] ^ w[15];
w[15] = w[15] << 1 | w[15] >> 31;
//64 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[0];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//65 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[1];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//66 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[2];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//67 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[3];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//68 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[4];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//69 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[5];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//70 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[6];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//71 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[7];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//72 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[8];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//73 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[9];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//74 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[10];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//75 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[11];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//76 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[12];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//77 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[13];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//78 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[14];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
//79 of 64-80
f = b ^ c ^ d;
temp = ((a << 5) | (a >> 27)) + f + e + k + w[15];
e = d;
d = c;
c = (b << 30) | (b >> 2);
b = a;
a = temp;
h.h1 += a;
h.h2 += b;
h.h3 += c;
h.h4 += d;
h.h5 += e;
}
/*
__global__ void computeSHA1(char* buf, int *offsets, int *len, char* output, int N)
{
//__shared__ uint32_t w_shared[16*SHA1_THREADS_PER_BLK];
uint32_t w_register[16];
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N) {
uint32_t *w = w_register;//w_shared + 16*threadIdx.x;
hash_digest_t h;
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
int num_iter = (len[index]+63+9)/64;
debugprint("num_iter %d\n", num_iter);
for(int i = 0; i < num_iter; i++)
computeSHA1Block(buf + offsets[index], w, i*64 , len[index], h);
h.h1 = swap(h.h1);
h.h2 = swap(h.h2);
h.h3 = swap(h.h3);
h.h4 = swap(h.h4);
h.h5 = swap(h.h5);
uint32_t * out = (uint32_t*)(output + index*20);
*(out++) = h.h1;
*(out++) = h.h2;
*(out++) = h.h3;
*(out++) = h.h4;
*(out++) = h.h5;
}
}*/
/*
some how *pad = *pad++ ^ *key++
was optimized and does not work correctly in GPU oTL.
*/
__device__ void xorpads(uint32_t *pad, uint32_t* key) {
#pragma unroll 16
for (int i = 0; i < 16; i++)
*(pad + i) = *(pad + i) ^ *(key + i);
}
/*
uint32_t opad[16] =
{ 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c,
0x5c5c5c5c, };
uint32_t ipad[16] =
{ 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636,
0x36363636, };
*/
// in: start pointer of the data to be authenticated by hsha1.
// out: start pointer of the data where hsha1 signature will be recorded.
// length: length of the data to be authenticated by hsha1.
// key: hmac key.
__device__ void HMAC_SHA1(uint32_t *in, uint32_t *out, uint32_t length,
char *key) {
uint32_t w_register[16];
uint32_t *w = w_register; //w_shared + 16*threadIdx.x;
hash_digest_t h;
for (int i = 0; i < 16; i++)
w[i] = 0x36363636;
xorpads(w, (uint32_t*) (key));
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
//SHA1 compute on ipad
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA1 compute on mesage
int num_iter = (length + 63 + 9) / 64;
for (int i = 0; i < num_iter; i++)
computeSHA1Block((char*) in, w, i * 64, length, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
h.h1 = 0x67452301;
h.h2 = 0xEFCDAB89;
h.h3 = 0x98BADCFE;
h.h4 = 0x10325476;
h.h5 = 0xC3D2E1F0;
for (int i = 0; i < 16; i++)
w[i] = 0x5c5c5c5c;
xorpads(w, (uint32_t*) (key));
//SHA 1 compute on opads
computeSHA1Block((char*) w, w, 0, 64, h);
//SHA 1 compute on (hash of ipad|m)
computeSHA1Block((char*) out, w, 0, 20, h);
*(out) = swap(h.h1);
*(out + 1) = swap(h.h2);
*(out + 2) = swap(h.h3);
*(out + 3) = swap(h.h4);
*(out + 4) = swap(h.h5);
}
/*******************************************************************
AES CBC kernel
******************************************************************/
/* former prototype
__global__ void
AES_cbc_128_encrypt_kernel_SharedMem(const uint8_t *in_all,
uint8_t *out_all,
const uint32_t *pkt_offset,
const uint8_t *keys,
uint8_t *ivs,
const unsigned int num_flows,
uint8_t *checkbits = 0)
*/
__global__ void
AES_cbc_128_encrypt_kernel_SharedMem(
const uint8_t *in_all,
uint8_t *out_all,
size_t *input_size_arr,
size_t *output_size_arr,
int num_flows,
uint8_t *checkbits,
int *key_idxs,
struct aes_sa_entry *key_array,
uint8_t *ivs,
const uint32_t *pkt_offset
)
{
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_flows)
return;
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= num_flows)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
/* make sure T boxes have been initialized. */
__syncthreads();
/* Locate data */
const uint8_t *in = pkt_offset[idx] + in_all;
uint8_t *out = pkt_offset[idx] + out_all;
/*
int temp = key_idxs[idx];
assert(temp == key_array[temp].entry_idx);
assert(key_array[temp].aes_key != NULL);
*/
const uint8_t *key = key_array[key_idxs[idx]].aes_key;
uint8_t *ivec = idx * AES_BLOCK_SIZE + ivs;
/* Encrypt using cbc mode */
unsigned long len = pkt_offset[idx + 1] - pkt_offset[idx];
const unsigned char *iv = ivec;
while (len >= AES_BLOCK_SIZE) {
*((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1);
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
for(unsigned n = 0; n < len; ++n)
out[n] = in[n] ^ iv[n];
for(unsigned n = len; n < AES_BLOCK_SIZE; ++n)
out[n] = iv[n];
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
}
*((uint4*)ivec) = *((uint4*)iv);
__syncthreads();
if (threadIdx.x == 0 && checkbits != 0)
*(checkbits + blockIdx.x) = 1;
}
__global__
void AES_cbc_128_decrypt_kernel_SharedMem(const uint8_t *in_all,
uint8_t *out_all,
uint8_t *keys,
uint8_t *ivs,
uint16_t *pkt_index,
unsigned long block_count,
uint8_t *checkbits = 0
)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ uint32_t shared_Td0[256];
__shared__ uint32_t shared_Td1[256];
__shared__ uint32_t shared_Td2[256];
__shared__ uint32_t shared_Td3[256];
__shared__ uint8_t shared_Td4[256];
__shared__ uint32_t shared_Rcon[10];
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
/* computer the thread id */
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= 256)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
shared_Td0[index] = Td0_ConstMem[index];
shared_Td1[index] = Td1_ConstMem[index];
shared_Td2[index] = Td2_ConstMem[index];
shared_Td3[index] = Td3_ConstMem[index];
shared_Td4[index] = Td4_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
for (unsigned i = 0; i * blockDim.x < 10; i++) {
int index = threadIdx.x + blockDim.x * i;
if (index < 10) {
shared_Rcon[index] = rcon[index];
}
}
__syncthreads();
if (idx >= block_count)
return;
/* Locate data */
const uint8_t *in = idx * AES_BLOCK_SIZE + in_all;
uint8_t *out = idx * AES_BLOCK_SIZE + out_all;
uint16_t packet_index = pkt_index[idx];
uint32_t rk[4];
rk[0] = *((uint32_t*)(keys + 16 * packet_index));
rk[1] = *((uint32_t*)(keys + 16 * packet_index + 4));
rk[2] = *((uint32_t*)(keys + 16 * packet_index + 8));
rk[3] = *((uint32_t*)(keys + 16 * packet_index + 12));
uint8_t *ivec = packet_index * AES_BLOCK_SIZE + ivs;
/* Decrypt using cbc mode */
const unsigned char *iv;
if (idx == 0 || pkt_index[idx] != pkt_index[idx-1])
iv = ivec;
else
iv = in - AES_BLOCK_SIZE;
AES_128_decrypt(in, out, rk,
shared_Td0, shared_Td1, shared_Td2, shared_Td3, shared_Td4,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
*((uint64_t*)out) = *((uint64_t*)out) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)out) + 1) ^ *(((uint64_t*)iv) + 1);
__syncthreads();
if (threadIdx.x == 0 && checkbits != 0)
*(checkbits + blockIdx.x) = 1;
}
/*******************************************************************
AES ECB kernel
******************************************************************/
__global__ void
AES_ecb_encrypt_kernel(const uint8_t *in_all,
uint8_t *out_all,
const uint8_t *keys,
uint16_t *pkt_index,
unsigned long block_count
)
{
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
/* initialize T boxes, #threads in block should be larger than 256 */
for (unsigned i = 0; i * blockDim.x < 256; i++) {
unsigned index = i * blockDim.x + threadIdx.x;
if (index >= 256)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for (unsigned i = 0; i * blockDim.x < 10; i++) {
unsigned index = threadIdx.x + blockDim.x * i;
if (index < 10) {
shared_Rcon[index] = rcon[index];
}
}
if (idx >= block_count)
return;
/* make sure T boxes have been initialized. */
__syncthreads();
/* Locate data */
const uint8_t *in = idx * AES_BLOCK_SIZE + in_all;
uint8_t *out = idx * AES_BLOCK_SIZE + out_all;
uint16_t pktIndex = pkt_index[idx];
const uint8_t *key = pktIndex * 16 + keys;
AES_128_encrypt(in, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
}
/**************************************************************************
Exported C++ function wrapper function for CUDA kernel
***************************************************************************/
/*
* Sangwook: Those wrapper functions are not used in NBA.
void AES_cbc_128_decrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
uint8_t *keys_d,
uint8_t *ivs_d,
uint16_t *pkt_index_d,
unsigned long block_count,
uint8_t *checkbits_d,
const unsigned int threads_per_blk,
cudaStream_t stream )
{
unsigned int num_cuda_blks = (block_count+threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
AES_cbc_128_decrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk>>>(
in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d);
} else {
AES_cbc_128_decrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk, 0, stream>>>(
in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d);
}
}
void AES_cbc_128_encrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
const uint32_t *pkt_offset_d,
const uint8_t *keys_d,
uint8_t *ivs_d,
const unsigned int num_flows,
uint8_t *checkbits_d,
const unsigned int threads_per_blk,
cudaStream_t stream)
{
unsigned int num_cuda_blks = (num_flows+threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk>>>(
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
} else {
AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk, 0, stream>>>(
in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d);
}
}
void AES_ecb_128_encrypt_gpu(const uint8_t *in_d,
uint8_t *out_d,
const uint8_t *keys_d,
uint16_t *pkt_index_d,
unsigned long block_count,
const unsigned int threads_per_blk,
cudaStream_t stream)
{
unsigned int num_cuda_blks = (block_count + threads_per_blk - 1) / threads_per_blk;
if (stream == 0) {
AES_ecb_encrypt_kernel<<<num_cuda_blks, threads_per_blk>>>(
in_d, out_d, keys_d, pkt_index_d, block_count);
} else {
AES_ecb_encrypt_kernel<<<num_cuda_blks, threads_per_blk, 0, stream>>>(
in_d, out_d, keys_d, pkt_index_d, block_count);
}
}
*/
/**************************************************************************
Key Setup for Decryption
***************************************************************************/
void AES_decrypt_key_prepare(uint8_t *dec_key,
const uint8_t *enc_key,
unsigned int key_bits)
{
uint32_t rk_buf[60];
uint32_t *rk = rk_buf;
int i = 0;
uint32_t temp;
rk[0] = GETU32(enc_key );
rk[1] = GETU32(enc_key + 4);
rk[2] = GETU32(enc_key + 8);
rk[3] = GETU32(enc_key + 12);
if (key_bits == 128) {
for (;;) {
temp = rk[3];
rk[4] = rk[0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon_host[i];
rk[5] = rk[1] ^ rk[4];
rk[6] = rk[2] ^ rk[5];
rk[7] = rk[3] ^ rk[6];
if (++i == 10) {
rk += 4;
goto end;
}
rk += 4;
}
}
rk[4] = GETU32(enc_key + 16);
rk[5] = GETU32(enc_key + 20);
if (key_bits == 192) {
for (;;) {
temp = rk[ 5];
rk[ 6] = rk[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon_host[i];
rk[ 7] = rk[ 1] ^ rk[ 6];
rk[ 8] = rk[ 2] ^ rk[ 7];
rk[ 9] = rk[ 3] ^ rk[ 8];
if (++i == 8) {
rk += 6;
goto end;
}
rk[10] = rk[ 4] ^ rk[ 9];
rk[11] = rk[ 5] ^ rk[10];
rk += 6;
}
}
rk[6] = GETU32(enc_key + 24);
rk[7] = GETU32(enc_key + 28);
if (key_bits == 256) {
for (;;) {
temp = rk[ 7];
rk[ 8] = rk[ 0] ^
(Te4[(temp >> 16) & 0xff] & 0xff000000) ^
(Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
(Te4[(temp ) & 0xff] & 0x0000ff00) ^
(Te4[(temp >> 24) ] & 0x000000ff) ^
rcon_host[i];
rk[ 9] = rk[ 1] ^ rk[ 8];
rk[10] = rk[ 2] ^ rk[ 9];
rk[11] = rk[ 3] ^ rk[10];
if (++i == 7) {
rk += 8;
goto end;
}
temp = rk[11];
rk[12] = rk[ 4] ^
(Te4[(temp >> 24) ] & 0xff000000) ^
(Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
(Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
(Te4[(temp ) & 0xff] & 0x000000ff);
rk[13] = rk[ 5] ^ rk[12];
rk[14] = rk[ 6] ^ rk[13];
rk[15] = rk[ 7] ^ rk[14];
rk += 8;
}
}
end:
memcpy(dec_key, rk, 16);
}
/**************************************************************************
Experimental Codes
***************************************************************************/
/*
__global__ void computeHMAC_SHA1_AES(
uint8_t *input_buf, uint8_t *output,
size_t *input_size_arr, size_t *output_size_arr,
int N, uint8_t *checkbits_d,
int *key_idxs,
struct hmac_sa_entry *hmac_aes_key_array,
int32_t *offsets)
*/
__global__ void computeHMAC_SHA1_AES(
uint8_t* input_buf, uint8_t *output_buf,
size_t *input_size_arr, size_t *output_size_arr,
int N, uint8_t *checkbits_d,
const uint8_t* __restrict__ ivs,
const int32_t* __restrict__ key_idxs, const struct hmac_aes_sa_entry* __restrict__ hmac_aes_key_array,
const int32_t* __restrict__ offsets)
{
/* computer the thread id */
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
/* Locate data */
const uint8_t *in = input_buf + offsets[idx];
uint8_t *out = output_buf + offsets[idx];
/*
int temp = key_idxs[idx];
assert(temp == key_array[temp].entry_idx);
assert(key_array[temp].aes_key != NULL);
*/
__shared__ uint32_t shared_Te0[256];
__shared__ uint32_t shared_Te1[256];
__shared__ uint32_t shared_Te2[256];
__shared__ uint32_t shared_Te3[256];
__shared__ uint32_t shared_Rcon[10];
/* initialize T boxes */
for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) {
unsigned index = threadIdx.x + i * blockDim.x;
if (index >= N)
break;
shared_Te0[index] = Te0_ConstMem[index];
shared_Te1[index] = Te1_ConstMem[index];
shared_Te2[index] = Te2_ConstMem[index];
shared_Te3[index] = Te3_ConstMem[index];
}
for(unsigned i = 0; i * blockDim.x < 10; i++){
int index = threadIdx.x + blockDim.x * i;
if(index < 10){
shared_Rcon[index] = rcon[index];
}
}
/* make sure T boxes have been initialized. */
// __syncthreads();
const uint8_t *key = (const uint8_t*) hmac_aes_key_array[key_idxs[idx]].aes_key;
uint8_t *ivec = (uint8_t*) (idx * AES_BLOCK_SIZE + ivs);
/* Encrypt using cbc mode */
unsigned long len = (unsigned long) input_size_arr[idx];
const unsigned char *iv = ivec;
while (len >= AES_BLOCK_SIZE) {
*((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv);
*(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1);
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
len -= AES_BLOCK_SIZE;
in += AES_BLOCK_SIZE;
out += AES_BLOCK_SIZE;
}
if (len) {
for(unsigned n = 0; n < len; ++n)
out[n] = in[n] ^ iv[n];
for(unsigned n = len; n < AES_BLOCK_SIZE; ++n)
out[n] = iv[n];
AES_128_encrypt(out, out, key,
shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon);
iv = out;
}
*((uint4*)ivec) = *((uint4*)iv);
// __syncthreads();
// HMAC-SHA1 hashing
int32_t offset = offsets[idx];
char *hmac_key = (char *) hmac_aes_key_array[key_idxs[idx]].hmac_key;
uint16_t length = (uint16_t) input_size_arr[idx];
if (offset != -1) {
// printf("TID:%4d \t Offset %10u, Length %10u\n", idx, offset, length);
HMAC_SHA1((uint32_t*) (input_buf + offset), (uint32_t*) (output_buf + idx * SHA_DIGEST_LENGTH), length, (char*)hmac_key);
// output_size_arr[idx] = SHA_DIGEST_LENGTH; // as output_roi is CUSTOMDATA, output_size_arr is not used.
}
}
__syncthreads();
if (threadIdx.x == 0 && checkbits_d != 0)
*(checkbits_d + blockIdx.x) = 1;
}
/* Among AES_cbc_128_decryption, AES_cbc_128_encryption,
* AES_ecb_128_encryption and AES_decrypt_key_prepare(),
* AES_cbc_128_encrypt_gpu() is only used in NBA, for now. */
void *nba::ipsec_hmac_sha1_aes_get_cuda_kernel() {
return reinterpret_cast<void *> (computeHMAC_SHA1_AES);
}
|
c142e6f6c841e446631ae014e8e333e6b0a98f32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeft(float x, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
float xcoord = (x + 1) * (width - 1) / 2;
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void sumReduceShMem(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
__global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut;
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inTopLeft=0;
float inTopRight=0;
float inBottomLeft=0;
float inBottomRight=0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t];
if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t];
if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t];
if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t];
v = xWeightTopLeft * yWeightTopLeft * inTopLeft
+ (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight
+ xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft
+ (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerBHWD_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
dim3 blocks((output->size[2]+15)/16, output->size[1], output->size[0]);
dim3 threads(32,16);
/* assume BHWD */
hipLaunchKernelGGL(( bilinearSamplingFromGrid) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) , THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 3),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, output, 2));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSampling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int gradOutput_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeft + gradInputImages_strideWidth * xInTopLeft;
const int gradInputImagesTopRightAddress = gradInputImagesTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomLeftAddress = gradInputImagesTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomRightAddress = gradInputImagesBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
float topLeftDotProduct = 0;
float topRightDotProduct = 0;
float bottomLeftDotProduct = 0;
float bottomRightDotProduct = 0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
// bool between(int value, int lowerBound, int upperBound)
if(topLeftIsIn)
{
float inTopLeft = inputImages_data[inTopLeftAddress + t];
topLeftDotProduct += inTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftAddress + t], xWeightTopLeft * yWeightTopLeft * gradOutValue);
}
if(topRightIsIn)
{
float inTopRight = inputImages_data[inTopRightAddress + t];
topRightDotProduct += inTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightAddress + t], (1 - xWeightTopLeft) * yWeightTopLeft * gradOutValue);
}
if(bottomLeftIsIn)
{
float inBottomLeft = inputImages_data[inBottomLeftAddress + t];
bottomLeftDotProduct += inBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftAddress + t], xWeightTopLeft * (1 - yWeightTopLeft) * gradOutValue);
}
if(bottomRightIsIn)
{
float inBottomRight = inputImages_data[inBottomRightAddress + t];
bottomRightDotProduct += inBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightAddress + t], (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
__shared__ volatile float __shmem[16][32];
__shmem[threadIdx.y][threadIdx.x] = topLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = topRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightDotProduct = __shmem[threadIdx.y][0];
yf = - xWeightTopLeft * topLeftDotProduct + xWeightTopLeft * bottomLeftDotProduct - (1-xWeightTopLeft) * topRightDotProduct + (1-xWeightTopLeft) * bottomRightDotProduct;
xf = - yWeightTopLeft * topLeftDotProduct + yWeightTopLeft * topRightDotProduct - (1-yWeightTopLeft) * bottomLeftDotProduct + (1-yWeightTopLeft) * bottomRightDotProduct;
if(threadIdx.x==0)
{
gridData[threadIdx.y*2] = yf * (inputImages_height-1) / 2;
gridData[threadIdx.y*2+1] = xf * (inputImages_width-1) / 2;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerBHWD_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
hipLaunchKernelGGL(( backwardBilinearSampling <false>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
hipLaunchKernelGGL(( backwardBilinearSampling <true>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) ,
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
0,
0,
0,
0,
0,
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", hipGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_BilinearSamplerBHWD__ [] = {
{"BilinearSamplerBHWD_updateOutput", cunn_BilinearSamplerBHWD_updateOutput},
{"BilinearSamplerBHWD_updateGradInput", cunn_BilinearSamplerBHWD_updateGradInput},
{"BilinearSamplerBHWD_updateGradInputOnlyGrid", cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid},
{NULL, NULL}
};
static void cunn_BilinearSamplerBHWD_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_BilinearSamplerBHWD__, "nn");
lua_pop(L,1);
}
| c142e6f6c841e446631ae014e8e333e6b0a98f32.cu | #include "utils.h"
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ void getTopLeft(float x, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
float xcoord = (x + 1) * (width - 1) / 2;
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void sumReduceShMem(volatile float s[])
{
/* obviously only works for 32 elements */
/* sums up a shared memory array of 32 elements, stores it in s[0] */
/* whole warp can then read first element (broadcasting) */
if(threadIdx.x<16) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+16]; }
if(threadIdx.x<8) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+8]; }
if(threadIdx.x<4) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+4]; }
if(threadIdx.x<2) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+2]; }
if(threadIdx.x<1) { s[threadIdx.x] = s[threadIdx.x] + s[threadIdx.x+1]; }
}
__global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut;
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inTopLeft=0;
float inTopRight=0;
float inBottomLeft=0;
float inBottomRight=0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t];
if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t];
if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t];
if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t];
v = xWeightTopLeft * yWeightTopLeft * inTopLeft
+ (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight
+ xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft
+ (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight;
output_data[outAddress + t] = v;
}
}
static int cunn_BilinearSamplerBHWD_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
dim3 blocks((output->size[2]+15)/16, output->size[1], output->size[0]);
dim3 threads(32,16);
/* assume BHWD */
bilinearSamplingFromGrid <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, output),
THCudaTensor_stride(state, output, 0),
THCudaTensor_stride(state, output, 3),
THCudaTensor_stride(state, output, 1),
THCudaTensor_stride(state, output, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, output, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateOutput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
template<bool onlyGrid> __global__ void backwardBilinearSampling(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth,
float* gradInputImages_data, int gradInputImages_strideBatch, int gradInputImages_strideChannels, int gradInputImages_strideHeight, int gradInputImages_strideWidth,
float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth,
float* gradGrids_data, int gradGrids_strideBatch, int gradGrids_strideYX, int gradGrids_strideHeight, int gradGrids_strideWidth,
float* gradOutput_data, int gradOutput_strideBatch, int gradOutput_strideChannels, int gradOutput_strideHeight, int gradOutput_strideWidth,
int inputImages_channels, int inputImages_height, int inputImages_width, int gradOutput_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates
// z = batch index
// threads : used for features
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < gradOutput_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < gradOutput_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(withinImageBounds)
{
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
const int gradInputImagesTopLeftAddress = gradInputImages_strideBatch * b + gradInputImages_strideHeight * yInTopLeft + gradInputImages_strideWidth * xInTopLeft;
const int gradInputImagesTopRightAddress = gradInputImagesTopLeftAddress + gradInputImages_strideWidth;
const int gradInputImagesBottomLeftAddress = gradInputImagesTopLeftAddress + gradInputImages_strideHeight;
const int gradInputImagesBottomRightAddress = gradInputImagesBottomLeftAddress + gradInputImages_strideWidth;
const int gradOutputAddress = gradOutput_strideBatch * b + gradOutput_strideHeight * yOut + gradOutput_strideWidth * xOut;
float topLeftDotProduct = 0;
float topRightDotProduct = 0;
float bottomLeftDotProduct = 0;
float bottomRightDotProduct = 0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
/*
In that loop we accumulate
- gradients into the gradInputImages array with atomic adds
- we compute the dot product that we need for the grid gradient
*/
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
float gradOutValue = gradOutput_data[gradOutputAddress + t];
// bool between(int value, int lowerBound, int upperBound)
if(topLeftIsIn)
{
float inTopLeft = inputImages_data[inTopLeftAddress + t];
topLeftDotProduct += inTopLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopLeftAddress + t], xWeightTopLeft * yWeightTopLeft * gradOutValue);
}
if(topRightIsIn)
{
float inTopRight = inputImages_data[inTopRightAddress + t];
topRightDotProduct += inTopRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesTopRightAddress + t], (1 - xWeightTopLeft) * yWeightTopLeft * gradOutValue);
}
if(bottomLeftIsIn)
{
float inBottomLeft = inputImages_data[inBottomLeftAddress + t];
bottomLeftDotProduct += inBottomLeft * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomLeftAddress + t], xWeightTopLeft * (1 - yWeightTopLeft) * gradOutValue);
}
if(bottomRightIsIn)
{
float inBottomRight = inputImages_data[inBottomRightAddress + t];
bottomRightDotProduct += inBottomRight * gradOutValue;
if(!onlyGrid) atomicAdd(&gradInputImages_data[gradInputImagesBottomRightAddress + t], (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * gradOutValue);
}
}
/*
Here we reduce the dot product and compute the grid gradient before writing it.
*/
/* could do shuffles and use no shmem at all but cuda arch is 2.0 */
__shared__ volatile float __shmem[16][32];
__shmem[threadIdx.y][threadIdx.x] = topLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = topRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
topRightDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomLeftDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomLeftDotProduct = __shmem[threadIdx.y][0];
__shmem[threadIdx.y][threadIdx.x] = bottomRightDotProduct;
sumReduceShMem(__shmem[threadIdx.y]);
bottomRightDotProduct = __shmem[threadIdx.y][0];
yf = - xWeightTopLeft * topLeftDotProduct + xWeightTopLeft * bottomLeftDotProduct - (1-xWeightTopLeft) * topRightDotProduct + (1-xWeightTopLeft) * bottomRightDotProduct;
xf = - yWeightTopLeft * topLeftDotProduct + yWeightTopLeft * topRightDotProduct - (1-yWeightTopLeft) * bottomLeftDotProduct + (1-yWeightTopLeft) * bottomRightDotProduct;
if(threadIdx.x==0)
{
gridData[threadIdx.y*2] = yf * (inputImages_height-1) / 2;
gridData[threadIdx.y*2+1] = xf * (inputImages_width-1) / 2;
}
}// must put a big if condition in order not to hang at __syncthreads()...
__syncthreads();
if(threadIdx.y==0 && withinGridBounds)
gradGrids_data[b*gradGrids_strideBatch + yOut*gradGrids_strideHeight + xOut*gradGrids_strideWidth + threadIdx.x] = gridData[threadIdx.x];
}
static int cunn_BilinearSamplerBHWD_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradInputImages = (THCudaTensor *)luaT_checkudata(L, 4, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
backwardBilinearSampling <false> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
THCudaTensor_data(state, gradInputImages),
THCudaTensor_stride(state, gradInputImages, 0),
THCudaTensor_stride(state, gradInputImages, 3),
THCudaTensor_stride(state, gradInputImages, 1),
THCudaTensor_stride(state, gradInputImages, 2),
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static int cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *inputImages = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *grids = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
THCudaTensor *gradGrids = (THCudaTensor *)luaT_checkudata(L, 5, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 6, "torch.CudaTensor");
dim3 blocks((gradOutput->size[2]+15)/16, gradOutput->size[1], gradOutput->size[0]);
dim3 threads(32,16);
backwardBilinearSampling <true> <<< blocks, threads, 0, THCState_getCurrentStream(state) >>> (
THCudaTensor_data(state, inputImages),
THCudaTensor_stride(state, inputImages, 0),
THCudaTensor_stride(state, inputImages, 3),
THCudaTensor_stride(state, inputImages, 1),
THCudaTensor_stride(state, inputImages, 2),
0,
0,
0,
0,
0,
THCudaTensor_data(state, grids),
THCudaTensor_stride(state, grids, 0),
THCudaTensor_stride(state, grids, 3),
THCudaTensor_stride(state, grids, 1),
THCudaTensor_stride(state, grids, 2),
THCudaTensor_data(state, gradGrids),
THCudaTensor_stride(state, gradGrids, 0),
THCudaTensor_stride(state, gradGrids, 3),
THCudaTensor_stride(state, gradGrids, 1),
THCudaTensor_stride(state, gradGrids, 2),
THCudaTensor_data(state, gradOutput),
THCudaTensor_stride(state, gradOutput, 0),
THCudaTensor_stride(state, gradOutput, 3),
THCudaTensor_stride(state, gradOutput, 1),
THCudaTensor_stride(state, gradOutput, 2),
THCudaTensor_size(state, inputImages, 3),
THCudaTensor_size(state, inputImages, 1),
THCudaTensor_size(state, inputImages, 2),
THCudaTensor_size(state, gradOutput, 2));
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in BilinearSampler.updateGradInput: %s\n", cudaGetErrorString(err));
THError("aborting");
}
return 1;
}
static const struct luaL_Reg cunn_BilinearSamplerBHWD__ [] = {
{"BilinearSamplerBHWD_updateOutput", cunn_BilinearSamplerBHWD_updateOutput},
{"BilinearSamplerBHWD_updateGradInput", cunn_BilinearSamplerBHWD_updateGradInput},
{"BilinearSamplerBHWD_updateGradInputOnlyGrid", cunn_BilinearSamplerBHWD_updateGradInputOnlyGrid},
{NULL, NULL}
};
static void cunn_BilinearSamplerBHWD_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunn_BilinearSamplerBHWD__, "nn");
lua_pop(L,1);
}
|
53b04493339a02e58c2f18fe66af4d636be80188.hip | // !!! This is a file automatically generated by hipify!!!
// Batch matrix multiplication:
#include <cstdio>
#include <cstdlib>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <memory.h>
#include <math.h>
#include <stdbool.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <cblas.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include <hip/hip_complex.h>
#include "expm.h"
// *** Matrix exponential program acting on matrices of type : [Double Complex] - 01/03/2019 ***
/*
Perform matrox-matrix multiplication for a batch of matrices
Batch instance all have uniform size
Input matrices inputtes as an array of pointers
*/
// int match_mul(hipblasHandle_t handle, int n, hipDoubleComplex** Tpowers){
// const hipDoubleComplex alf = make_cuDoubleComplex(1, 0);
// const hipDoubleComplex bet = make_cuDoubleComplex(0, 0);
// const hipDoubleComplex *alpha = &alf;
// const hipDoubleComplex *beta = &bet;
// hipblasZgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, *alpha, );
// }
// int match_mul(hipblasHandle_t handle, int n){
// printf("HERE WE ARE\n");
// int i,j,k, index;
// hipDoubleComplex* h_A = (hipDoubleComplex*) malloc(sizeof(hipDoubleComplex) * n * n * 3);
// hipDoubleComplex* h_B = (hipDoubleComplex*) malloc(sizeof(hipDoubleComplex) * n * n * 3);
// hipDoubleComplex* h_C = (hipDoubleComplex*) malloc(sizeof(hipDoubleComplex) * n * n * 3);
// printf("HERE WE ARE 2\n");
// for(k=0; k<3; k++) {
// for(j=0; j<n; j++) {
// for(i=0; i<n; i++) {
// index = i*n + j + k*n*n;
// h_A[index] = make_cuDoubleComplex(index, index);
// h_B[index] = make_cuDoubleComplex(index, index);
// h_C[index] = make_cuDoubleComplex(0, 0);
// }
// }
// }
// printf("HERE WE ARE 3\n");
// //float *d_A, *d_B, *d_C;
// hipDoubleComplex *d_A;
// hipMalloc(&d_A, sizeof(hipDoubleComplex) * n * n * 3);
// hipDoubleComplex *d_B;
// hipMalloc(&d_B, sizeof(hipDoubleComplex) * n * n * 3);
// hipDoubleComplex *d_C;
// hipMalloc(&d_C, sizeof(hipDoubleComplex) * n * n * 3);
// printf("HERE WE ARE 4\n");
// hipMemcpy(d_A, h_A, sizeof(hipDoubleComplex) * n * n * 3, hipMemcpyHostToDevice);
// hipMemcpy(d_B, h_B, sizeof(hipDoubleComplex) * n * n * 3, hipMemcpyHostToDevice);
// hipMemcpy(d_C, h_C, sizeof(hipDoubleComplex) * n * n * 3, hipMemcpyHostToDevice);
// printf("HERE WE ARE 4\n");
// float time_cuda_event;
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop) ;
// hipEventRecord(start, 0);
// const hipDoubleComplex alf = make_cuDoubleComplex(1, 0);
// const hipDoubleComplex bet = make_cuDoubleComplex(0, 0);
// const hipDoubleComplex *alpha = &alf;
// const hipDoubleComplex *beta = &bet;
// printf("HERE WE ARE 5\n");
// hipblasZgemmStridedBatched(handle,
// HIPBLAS_OP_N,
// HIPBLAS_OP_N,
// n, n, n,
// alpha,
// (const hipDoubleComplex*)d_A, n,
// n*n,
// (const hipDoubleComplex*)d_B, n,
// n*n,
// beta,
// d_C, n,
// n*n,
// 3);
// ( hipEventRecord(stop, 0) );
// ( hipEventSynchronize(stop) );
// ( hipEventElapsedTime(&time_cuda_event, start, stop) );
// printf("Time : %3.1f ms \n", time_cuda_event);
// hipMemcpy(h_C,d_C,sizeof(hipDoubleComplex) * n * n * 3,hipMemcpyDeviceToHost);
// // Destroy the handle
// ( hipEventRecord(stop, 0) );
// ( hipEventSynchronize(stop) );
// ( hipEventElapsedTime(&time_cuda_event, start, stop) );
// printf("HERE WE ARE 2\n");
// for(k=0; k<3; k++) {
// printf("\n\n\n");
// for(j=0; j<n; j++) {
// for(i=0; i<n; i++) {
// index = i*n + j + k*n*n;
// printf(" %lf ", h_C[index].x);
// printf("+ %lf i", h_C[index].y);
// }
// printf("\n");
// }
// }
// printf("\nTime : %3.1f ms \n", time_cuda_event);
// hipFree(d_A);
// hipFree(d_B);
// hipFree(d_C);
// free(h_A);
// free(h_B);
// free(h_C);
// return 0;
// }
void matrix_complex_print(hipDoubleComplex* A, int network_size){
for (int j = 0; j < network_size; j++){
printf("[");
for (int k = 0; k < network_size; k++){
printf(" %.15lf ", A[(j*network_size) + k].x );
printf("+");
printf(" %.15lfi ", A[(j*network_size) + k].y );
}
printf("]");
printf("\n");
}
}
void new_matrix_multiply(hipblasHandle_t &handle, hipDoubleComplex *A, hipDoubleComplex *B, hipDoubleComplex *C, hipDoubleComplex *d_A, hipDoubleComplex* d_B, hipDoubleComplex *d_C, int n, double* multiply_total_time){
clock_t multiply_begin = clock();
const hipDoubleComplex alf = make_cuDoubleComplex(1, 0);
const hipDoubleComplex bet = make_cuDoubleComplex(0, 0);
const hipDoubleComplex *alpha = &alf;
const hipDoubleComplex *beta = &bet;
hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, alpha, d_B, n, d_A, n, beta, d_C, n); // Perform the cublas matrix multiplication
hipMemcpy(C, d_C, n * n * sizeof(hipDoubleComplex),hipMemcpyDeviceToHost); // Copy product back to host
clock_t multiply_end = clock();
double time_spent = (double)(multiply_end - multiply_begin) / CLOCKS_PER_SEC;
multiply_total_time[0] = multiply_total_time[0] + time_spent;
}
void matrix_Multiply(hipblasHandle_t &handle, hipDoubleComplex *A, hipDoubleComplex *B, hipDoubleComplex *C, hipDoubleComplex *d_A, hipDoubleComplex* d_B, hipDoubleComplex *d_C, int n, double* multiply_total_time){
clock_t multiply_begin = clock();
const hipDoubleComplex alf = make_cuDoubleComplex(1, 0);
const hipDoubleComplex bet = make_cuDoubleComplex(0, 0);
const hipDoubleComplex *alpha = &alf;
const hipDoubleComplex *beta = &bet;
hipMemcpy(d_A, A, n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy first operand to the device
hipMemcpy(d_B, B, n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy second operand to the device
hipblasZgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, alpha, d_B, n, d_A, n, beta, d_C, n); // Perform the cublas matrix multiplication
hipMemcpy(C, d_C, n * n * sizeof(hipDoubleComplex),hipMemcpyDeviceToHost); // Copy product back to host
clock_t multiply_end = clock();
double time_spent = (double)(multiply_end - multiply_begin) / CLOCKS_PER_SEC;
multiply_total_time[0] = multiply_total_time[0] + time_spent;
}
hipDoubleComplex **get_Matrix_Powers_New(hipDoubleComplex *A, hipDoubleComplex* d_A, hipDoubleComplex* d_B, hipDoubleComplex* d_C, hipblasHandle_t handle, int n, double* multiply_total_time) {
hipDoubleComplex **Tpowers = (hipDoubleComplex **) malloc(11 * sizeof(hipDoubleComplex *));
for (int i = 0; i < 11; i++) {
Tpowers[i] = (hipDoubleComplex *) malloc(n * n * sizeof(hipDoubleComplex));
}
memcpy(Tpowers[1], A, n * n * sizeof(hipDoubleComplex));
// Is it possible to reduce the number of memroy copies here?
// 1 --- 1
// 2 ---- 2
// 4 --- 6
// 4 ---- 4
hipMemcpy(d_A, Tpowers[1], n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy first operand to the device
hipMemcpy(d_B, Tpowers[1], n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy second operand to the d
new_matrix_multiply(handle, d_A, d_B, Tpowers[2], d_A, d_B, d_C, n, multiply_total_time);
//hipMemcpy(C, d_C, n * n * sizeof(hipDoubleComplex),hipMemcpyDeviceToHost); // Copy product back to host
//hipDoubleComplex * tmp = d_A; d_A = d_C; d_C = tmp;
hipMemcpy(d_A, Tpowers[2], n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy first operand to the device
//hipMemcpy(d_B, Tpowers[2], n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy second operand to the d
new_matrix_multiply(handle, d_A, d_A, Tpowers[4], d_A, d_B, d_C, n, multiply_total_time);
//matrix_complex_print(Tpowers[4], n);
hipMemcpy(d_A, Tpowers[4], n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy first operand to the devic
new_matrix_multiply(handle, d_A, d_B, Tpowers[6], d_A, d_B, d_C, n, multiply_total_time);
hipMemcpy(d_A, Tpowers[4], n * n * sizeof(hipDoubleComplex),hipMemcpyHostToDevice); // Copy first operand to the devic
new_matrix_multiply(handle, d_B, d_B, Tpowers[8], d_A, d_B, d_C, n, multiply_total_time);
return Tpowers;
// IS IT FASTER & IS IT CORRECT?
// matrix_Multiply(handle, Tpowers[1], Tpowers[1], Tpowers[2], d_A, d_B, d_C, n, multiply_total_time);
// matrix_Multiply(handle, Tpowers[2], Tpowers[2], Tpowers[4], d_A, d_B, d_C, n, multiply_total_time);
// matrix_Multiply(handle, Tpowers[4], Tpowers[2], Tpowers[6], d_A, d_B, d_C, n, multiply_total_time);
// matrix_Multiply(handle, Tpowers[4], Tpowers[4], Tpowers[8], d_A, d_B, d_C, n, multiply_total_time);
}
// ---- Current Work ---
void InverseOfMatrix_Alternative_Two(hipDoubleComplex* L, hipDoubleComplex* inverse, int n, hipDoubleComplex* b){ // Calculate matrix inverse through LU factorisation
cusolverStatus_t status; // Link to the cusolver context
hipsolverDnHandle_t handler;
status = hipsolverDnCreate(&handler);
hipDoubleComplex* A;
int* dLUPivots_ALT;
int* dLUInfo_ALT;
hipDoubleComplex *buffer = NULL;
int bufferSize = 0;
int h_info = 0;
hipDoubleComplex *x;
hipMalloc(&A, sizeof(hipDoubleComplex)*n*n), "Failed to allocate A!";
hipMalloc(&x, n * n*sizeof(hipDoubleComplex)), "Failed to allocate x!";
hipMalloc(&dLUPivots_ALT, n * sizeof(int)), "Failed to allocate dLUPivots!";
hipMalloc(&dLUInfo_ALT, sizeof(int)), "Failed to allocate dLUInfo!";
hipMemcpy(A, L, n*n*sizeof(hipDoubleComplex), hipMemcpyHostToDevice), "Failed to copy to adL!";
hipMemcpy(x, b, sizeof(hipDoubleComplex)*n*n, hipMemcpyHostToDevice);
hipsolverDnZgetrf_bufferSize(handler, n, n, (hipDoubleComplex*)A, n, &bufferSize);
hipMalloc(&buffer, sizeof(hipDoubleComplex)*bufferSize);
status = hipsolverDnZgetrf(handler, n, n, A, n, buffer, dLUPivots_ALT, dLUInfo_ALT);
if(status!=CUSOLVER_STATUS_SUCCESS){
printf("ERROR!!\n");
}
hipMemcpy(&h_info, dLUInfo_ALT, sizeof(int), hipMemcpyDeviceToHost);
if ( h_info != 0 ){
fprintf(stderr, "Error: LU factorization failed\n");
printf("%d\n", h_info );
}
hipsolverDnZgetrs(handler, HIPBLAS_OP_N, n, n, A, n, dLUPivots_ALT, x, n, dLUInfo_ALT);
hipDeviceSynchronize();
if(status!=CUSOLVER_STATUS_SUCCESS){
printf("ERROR!!\n");
}
hipMemcpy(&h_info, dLUInfo_ALT, sizeof(int), hipMemcpyDeviceToHost);
if ( h_info != 0 ){
fprintf(stderr, "Error: LU factorization failed\n");
}
hipMemcpy(inverse, x, sizeof(hipDoubleComplex) * n * n, hipMemcpyDeviceToHost), "Failed to copy to res!";
// Free device memory:
hipFree(dLUPivots_ALT);
hipFree(dLUInfo_ALT);
hipFree(A);
hipFree(x);
hipFree(buffer);
}
void matrix_Subtract_New(const hipDoubleComplex *a, const hipDoubleComplex *b, hipDoubleComplex *c, int n) { // PARALLEL CANDIDATE
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
c[(n * i) + j] = cuCsub(a[(n * i) + j], b[(n * i) + j]); // Complex subtraction
}
}
}
void matrixAdd_New(const hipDoubleComplex *a, const hipDoubleComplex *b, hipDoubleComplex *c, int n) { // PARALLEL CANDIDATE
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
c[(n * i) + j] = cuCadd(a[(n * i) + j], b[(n * i) + j]); // Complex addition
}
}
}
void set_Identity_New(hipDoubleComplex *i_matrix, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i == j) {
i_matrix[(n * i) + j].x = 1;
} else {
i_matrix[(n * i) + j].x = 0;
}
}
}
}
void matrix_Scale_New(hipDoubleComplex *a, hipDoubleComplex *scaled, hipDoubleComplex scale, int n, double* scale_total_time ) {
clock_t scale_begin = clock();
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
//scaled[(n * i) + j] = a[(n * i) + j] * scale;
scaled[(n * i) + j] = cuCmul(a[(n * i) + j],scale); // Complex multiplication
}
}
clock_t scale_end = clock();
double time_spent = (double)(scale_end - scale_begin) / CLOCKS_PER_SEC;
scale_total_time[0] = scale_total_time[0] + time_spent;
}
void matrix_Absolute_New(hipDoubleComplex *a, hipDoubleComplex *b, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
b[(n * i) + j].x = cuCabs((a[(n * i) + j]));
b[(n * i) + j].y = 0;
}
}
}
double calculate_one_norm_New_complex(const hipDoubleComplex *A, int n) {
double max = -DBL_MAX;
double count;
for (int i = 0; i < n; i++) {
count = 0;
for (int j = 0; j < n; j++) {
count += cuCabs((A[(n * j) + i]));
}
if (count > max) {;
max = count;
};
}
return max;
}
// COMPUTING OPTIMAL PARAMETERS
double ell(hipDoubleComplex *A, hipDoubleComplex *temp_new, double coeff, int m_val, int n, double* scale_total_time) {
double norm_one, norm_two, p, alpha, output;
memcpy(A, temp_new, n * n * sizeof(hipDoubleComplex));
matrix_Absolute_New(A, temp_new, n);
p = pow(coeff, (1.0 / (2 * m_val + 1)));
matrix_Scale_New(temp_new, temp_new, make_cuDoubleComplex(p, 0), n, scale_total_time);
norm_one = calculate_one_norm_New_complex(A, n);
norm_two = calculate_one_norm_New_complex(temp_new, n);
alpha = norm_two / norm_one;
output = fmax(ceil(log2((2 * alpha) / 2.220446049250313e-16) / (2 * m_val)), 0);
return output;
}
void get_pade_coefficients(double *buf, int m) {
double coefficients[5][14] = {
{120, 60, 12, 1},
{30240, 15120, 3360, 420, 30, 1},
{17297280, 8648640, 1995840, 277200, 25200, 1512, 56 ,1},
{17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1},
{64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, 129060195264000, 10559470521600, 670442572800, 33522128640, 1323241920, 40840800, 960960, 16380, 182, 1}
};
switch (m) {
case 3 : {
buf = coefficients[0];
}
case 5 : {
buf = coefficients[1];
}
case 7 : {
buf = coefficients[2];
}
case 9 : {
buf = coefficients[3];
}
case 13 : {
for (int i = 0; i < sizeof(coefficients[4]) / sizeof(double); i++) {
buf[i] = coefficients[4][i];
}
}
default:
break;
}
}
int main(){
hipblasHandle_t handle;
hipblasCreate(&handle);
// VARIABLES TO HOLD TOTAL COMPONENT TIMES:
double* scale_total_time = (double *) malloc(1 * sizeof(double));
double* multiply_total_time = (double *) malloc(1 * sizeof(double));
hipDoubleComplex *A;
int n = 1000;
A = (hipDoubleComplex *) malloc(n * n * sizeof(hipDoubleComplex));
printf("HERE\n");
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
A[(n*i) + j].x = (n*i) + j;
A[(n*i) + j].y = (i*j);
}
}
printf("HERE\n");
clock_t begin = clock(); // Begin recording expm execution
// CUBLAS HANDLE:
// hipblasHandle_t handle;
// hipblasCreate(&handle);
// Allocate 3 arrays on GPU
hipDoubleComplex *d_A, *d_B, *d_C;
hipMalloc(&d_A, n * n * sizeof(hipDoubleComplex));
hipMalloc(&d_B, n * n * sizeof(hipDoubleComplex));
hipMalloc(&d_C, n * n * sizeof(hipDoubleComplex));
double theta[5] = {1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000,
5.371920351148152e+000};
double error_coefficients[5] = {1 / 100800.0, 1 / 10059033600.0, 1 / 4487938430976000.0,
1 / 113250775606021113483283660800000000.0,
1 / 113250775606021113483283660800000000.0};
// Allocate temporary arrays to hold temporary matrices used at various stages in the calculation
hipDoubleComplex *identity_new;
hipDoubleComplex *U_new;
hipDoubleComplex *V_new;
hipDoubleComplex *temp_new;
hipDoubleComplex *temp_2_new;
identity_new = (hipDoubleComplex *) malloc(n * n * sizeof(hipDoubleComplex));
U_new = (hipDoubleComplex *) malloc(n * n * sizeof(hipDoubleComplex));
V_new = (hipDoubleComplex *) malloc(n * n * sizeof(hipDoubleComplex));
temp_new = (hipDoubleComplex *) malloc(n * n * sizeof(hipDoubleComplex));
temp_2_new = (hipDoubleComplex *) malloc(n * n * sizeof(hipDoubleComplex));
double d4, d6, d8, d10, eta1, eta3, eta4, eta5, s;
hipDoubleComplex **Tpowers = get_Matrix_Powers_New(A, d_A, d_B, d_C, handle, n, multiply_total_time);
d4 = pow(calculate_one_norm_New_complex(Tpowers[4], n), (1.0 / 4));
d6 = pow(calculate_one_norm_New_complex(Tpowers[6], n), (1.0 / 6));
eta1 = fmax(d4, d6);
int m_val = 0;
if (eta1 <= theta[1] && ell(A, temp_new, error_coefficients[1], 3, n, scale_total_time) == 0.0) {
m_val = 3;
}
if (eta1 <= theta[2] && ell(A, temp_new, error_coefficients[2], 5, n, scale_total_time) == 0.0) {
m_val = 5;
}
d8 = pow(calculate_one_norm_New_complex(Tpowers[8], n), (1.0 / 8));
eta3 = fmax(d6, d8);
if (eta3 <= theta[3] && ell(A, temp_new, error_coefficients[3], 7, n, scale_total_time) == 0.0) {
m_val = 7;
}
if (eta3 <= theta[4] && ell(A, temp_new, error_coefficients[4], 0, n, scale_total_time) == 0.0) {
m_val = 9;
}
d10 = pow(calculate_one_norm_New_complex(Tpowers[10], n), (1.0 / 10));
eta4 = fmax(d8, d10);
eta5 = fmin(eta3, eta4);
s = fmax(ceil(log2(eta5 / theta[4])), 0);
matrix_Scale_New(A, A, make_cuDoubleComplex(1 / pow(2, s), 0), n, scale_total_time);
s = s + ell(A, temp_new, error_coefficients[4], 13, n, scale_total_time);
if (isinf(s)) { // Revert to old estimate
int exp;
double t = frexp(calculate_one_norm_New_complex(A, n) / theta[4], &exp);
s = s - (t == 0.5);
} else {
m_val = 13;
}
if ((int) s != 0) { // Rescale the matrix powers array - Batch potential?
// const hipDoubleComplex alf = make_cuDoubleComplex(1, 0);
// const hipDoubleComplex bet = make_cuDoubleComplex(0, 0);
// const hipDoubleComplex *alpha = &alf;
// const hipDoubleComplex *beta = &bet;
hipDoubleComplex multiplier = make_cuDoubleComplex(0, 0);
multiplier.x = 1.0 / pow(2, (s * 1));
matrix_Scale_New(Tpowers[1], Tpowers[1], multiplier, n, scale_total_time);
multiplier.x = 1.0 / pow(2, (s * 2));
matrix_Scale_New(Tpowers[2], Tpowers[2], multiplier, n, scale_total_time);
multiplier.x = 1.0 / pow(2, (s * 4));
matrix_Scale_New(Tpowers[4], Tpowers[4], multiplier, n, scale_total_time);
multiplier.x = 1.0 / pow(2, (s * 6));
matrix_Scale_New(Tpowers[6], Tpowers[6], multiplier, n, scale_total_time);
}
// PADE APPROXIMATION:
double c[15] = {1};
get_pade_coefficients(c, m_val);
set_Identity_New(identity_new, n);
if (m_val == 3 || m_val == 5 || m_val == 7 || m_val == 9) {
int strt = sizeof(Tpowers) + 2;
for (int k = strt; k < m_val - 1; k += 2) {
matrix_Multiply(handle, Tpowers[2], Tpowers[k-2], Tpowers[k], d_A, d_B, d_C, n, multiply_total_time);
}
matrix_Scale_New(identity_new, U_new, make_cuDoubleComplex (c[1], 0), n, scale_total_time);
matrix_Scale_New(identity_new, V_new, make_cuDoubleComplex (c[0], 0), n, scale_total_time);
for (int j = m_val; j > n; j -= 2) {
matrix_Scale_New(Tpowers[j - 1], temp_new, make_cuDoubleComplex(c[j + 1], 0), n, scale_total_time);
matrixAdd_New(U_new, temp_new, U_new, n);
matrix_Scale_New(Tpowers[j - 1], temp_new, make_cuDoubleComplex(c[j], 0), n, scale_total_time);
matrixAdd_New(V_new, temp_new, V_new, n);
}
matrix_Multiply(handle, U_new, A, temp_new, d_A, d_B, d_C, n, multiply_total_time);
memcpy(U_new, temp_new, n * n * sizeof(hipDoubleComplex));
}
if (m_val == 13) {
clock_t VU_BEGIN = clock();
// CALCULATE U:
matrix_Scale_New(Tpowers[6], temp_new, make_cuDoubleComplex(c[13], 0), n, scale_total_time);
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex)); // IS THIS NEEDED?
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
matrix_Scale_New(Tpowers[4], temp_new, make_cuDoubleComplex(c[11], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(hipDoubleComplex)); // IS THIS NEEDED?
matrix_Scale_New(Tpowers[2], temp_new, make_cuDoubleComplex(c[9], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Multiply(handle, Tpowers[6], temp_2_new, temp_new, d_A, d_B, d_C, n, multiply_total_time);
matrix_Scale_New(Tpowers[6], temp_2_new, make_cuDoubleComplex(c[7], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
matrix_Scale_New(Tpowers[4], temp_2_new, make_cuDoubleComplex(c[5], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
matrix_Scale_New(Tpowers[2], temp_2_new, make_cuDoubleComplex(c[3], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
set_Identity_New(identity_new, n);
matrix_Scale_New(identity_new, temp_2_new, make_cuDoubleComplex(c[1], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(U_new, 0, n * n * sizeof(hipDoubleComplex)); // IS THIS NEEDED?
matrix_Multiply(handle, temp_new, Tpowers[1], U_new, d_A, d_B, d_C, n, multiply_total_time);
// CALCULATE V:
memset(temp_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Scale_New(Tpowers[6], temp_new, make_cuDoubleComplex(c[12], 0), n, scale_total_time);
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Scale_New(Tpowers[4], temp_2_new, make_cuDoubleComplex(c[10], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Scale_New(Tpowers[2], temp_new, make_cuDoubleComplex(c[8], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Multiply(handle, temp_2_new, Tpowers[6], temp_new, d_A, d_B, d_C, n, multiply_total_time);
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Scale_New(Tpowers[6], temp_2_new, make_cuDoubleComplex(c[6], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Scale_New(Tpowers[4], temp_2_new, make_cuDoubleComplex(c[4], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Scale_New(Tpowers[2], temp_2_new, make_cuDoubleComplex(c[2], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex));
set_Identity_New(identity_new, n);
matrix_Scale_New(identity_new, temp_2_new, make_cuDoubleComplex(c[0], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, V_new, n);
// CALCULATE V-U
matrix_Subtract_New(V_new, U_new, V_new, n);
matrix_Scale_New(U_new, U_new, make_cuDoubleComplex(2,0), n, scale_total_time);
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex));
clock_t inverse_begin = clock();
InverseOfMatrix_Alternative_Two(V_new, temp_2_new, n, identity_new);
clock_t inverse_end = clock();
double inverse_total_time = (double)(inverse_end - inverse_begin) / CLOCKS_PER_SEC;
memset(temp_new, 0, n * n * sizeof(hipDoubleComplex));
matrix_Multiply(handle, temp_2_new, U_new, temp_new, d_A, d_B, d_C, n, multiply_total_time);
// CALCULATE F:
matrixAdd_New(temp_new, identity_new, temp_new, n);
clock_t VU_END = clock();
double VU_time_spent = (double)(VU_END - VU_BEGIN) / CLOCKS_PER_SEC; // End recording expm execution
clock_t SQUARE_BEGIN = clock();
// SQUARING PHASE:
for (int k = 0; k < s; k++) {
matrix_Multiply(handle, temp_new, temp_new, temp_2_new, d_A, d_B, d_C, n, multiply_total_time);
memcpy(temp_new, temp_2_new, n * n * sizeof(hipDoubleComplex));
memset(temp_2_new, 0, n * n * sizeof(hipDoubleComplex));// Is this needed?
}
// PERFORMANCE OUTPUT:
clock_t SQUARE_END = clock();
double SQUARE_time_spent = (double)(SQUARE_END - SQUARE_BEGIN) / CLOCKS_PER_SEC; // End recording expm execution
// PERFORMANCE OUTPUT:
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; // End recording expm execution
printf("\n----------------------- MATRIX OPERATIONS PERCENTAGE BREAKDOWN -----------------------\n");
printf("\n TOTAL TIME ELAPSED: %lf seconds \n", time_spent);
printf("\n INVERSE: %lf%% \n", (inverse_total_time/time_spent)*100);
printf("\n SCALE: %lf%% \n", (scale_total_time[0]/time_spent)*100);
printf("\n MULTIPLY: %lf%% \n", (multiply_total_time[0]/time_spent)*100);
printf("\n V-U TIME SPENT: %lf%% \n", (VU_time_spent/time_spent)*100);
printf("\n SQUARING TIME SPENT: %lf%% \n", (SQUARE_time_spent/time_spent)*100);
// memcpy(expo, temp_new, n * n * sizeof(hipDoubleComplex)); // Copy to system propegator memory
}
// Free host memory
free(identity_new);
free(U_new);
free(V_new);
free(temp_new);
free(temp_2_new);
free(scale_total_time);
free(multiply_total_time);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
// Resources
// http://www.netlib.org/utk/people/JackDongarra/PAPERS/Factor_Inversion_Million_Matrices-iccs17.pdf
// http://mathforcollege.com/nm/mws/che/04sle/mws_che_sle_spe_luinverse.pdf | 53b04493339a02e58c2f18fe66af4d636be80188.cu | // Batch matrix multiplication:
#include <cstdio>
#include <cstdlib>
#include <string>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <memory.h>
#include <math.h>
#include <stdbool.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cblas.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include <cuComplex.h>
#include "expm.h"
// *** Matrix exponential program acting on matrices of type : [Double Complex] - 01/03/2019 ***
/*
Perform matrox-matrix multiplication for a batch of matrices
Batch instance all have uniform size
Input matrices inputtes as an array of pointers
*/
// int match_mul(cublasHandle_t handle, int n, cuDoubleComplex** Tpowers){
// const cuDoubleComplex alf = make_cuDoubleComplex(1, 0);
// const cuDoubleComplex bet = make_cuDoubleComplex(0, 0);
// const cuDoubleComplex *alpha = &alf;
// const cuDoubleComplex *beta = &bet;
// cublasZgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, *alpha, );
// }
// int match_mul(cublasHandle_t handle, int n){
// printf("HERE WE ARE\n");
// int i,j,k, index;
// cuDoubleComplex* h_A = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * n * n * 3);
// cuDoubleComplex* h_B = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * n * n * 3);
// cuDoubleComplex* h_C = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * n * n * 3);
// printf("HERE WE ARE 2\n");
// for(k=0; k<3; k++) {
// for(j=0; j<n; j++) {
// for(i=0; i<n; i++) {
// index = i*n + j + k*n*n;
// h_A[index] = make_cuDoubleComplex(index, index);
// h_B[index] = make_cuDoubleComplex(index, index);
// h_C[index] = make_cuDoubleComplex(0, 0);
// }
// }
// }
// printf("HERE WE ARE 3\n");
// //float *d_A, *d_B, *d_C;
// cuDoubleComplex *d_A;
// cudaMalloc(&d_A, sizeof(cuDoubleComplex) * n * n * 3);
// cuDoubleComplex *d_B;
// cudaMalloc(&d_B, sizeof(cuDoubleComplex) * n * n * 3);
// cuDoubleComplex *d_C;
// cudaMalloc(&d_C, sizeof(cuDoubleComplex) * n * n * 3);
// printf("HERE WE ARE 4\n");
// cudaMemcpy(d_A, h_A, sizeof(cuDoubleComplex) * n * n * 3, cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, h_B, sizeof(cuDoubleComplex) * n * n * 3, cudaMemcpyHostToDevice);
// cudaMemcpy(d_C, h_C, sizeof(cuDoubleComplex) * n * n * 3, cudaMemcpyHostToDevice);
// printf("HERE WE ARE 4\n");
// float time_cuda_event;
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop) ;
// cudaEventRecord(start, 0);
// const cuDoubleComplex alf = make_cuDoubleComplex(1, 0);
// const cuDoubleComplex bet = make_cuDoubleComplex(0, 0);
// const cuDoubleComplex *alpha = &alf;
// const cuDoubleComplex *beta = &bet;
// printf("HERE WE ARE 5\n");
// cublasZgemmStridedBatched(handle,
// CUBLAS_OP_N,
// CUBLAS_OP_N,
// n, n, n,
// alpha,
// (const cuDoubleComplex*)d_A, n,
// n*n,
// (const cuDoubleComplex*)d_B, n,
// n*n,
// beta,
// d_C, n,
// n*n,
// 3);
// ( cudaEventRecord(stop, 0) );
// ( cudaEventSynchronize(stop) );
// ( cudaEventElapsedTime(&time_cuda_event, start, stop) );
// printf("Time : %3.1f ms \n", time_cuda_event);
// cudaMemcpy(h_C,d_C,sizeof(cuDoubleComplex) * n * n * 3,cudaMemcpyDeviceToHost);
// // Destroy the handle
// ( cudaEventRecord(stop, 0) );
// ( cudaEventSynchronize(stop) );
// ( cudaEventElapsedTime(&time_cuda_event, start, stop) );
// printf("HERE WE ARE 2\n");
// for(k=0; k<3; k++) {
// printf("\n\n\n");
// for(j=0; j<n; j++) {
// for(i=0; i<n; i++) {
// index = i*n + j + k*n*n;
// printf(" %lf ", h_C[index].x);
// printf("+ %lf i", h_C[index].y);
// }
// printf("\n");
// }
// }
// printf("\nTime : %3.1f ms \n", time_cuda_event);
// cudaFree(d_A);
// cudaFree(d_B);
// cudaFree(d_C);
// free(h_A);
// free(h_B);
// free(h_C);
// return 0;
// }
void matrix_complex_print(cuDoubleComplex* A, int network_size){
for (int j = 0; j < network_size; j++){
printf("[");
for (int k = 0; k < network_size; k++){
printf(" %.15lf ", A[(j*network_size) + k].x );
printf("+");
printf(" %.15lfi ", A[(j*network_size) + k].y );
}
printf("]");
printf("\n");
}
}
void new_matrix_multiply(cublasHandle_t &handle, cuDoubleComplex *A, cuDoubleComplex *B, cuDoubleComplex *C, cuDoubleComplex *d_A, cuDoubleComplex* d_B, cuDoubleComplex *d_C, int n, double* multiply_total_time){
clock_t multiply_begin = clock();
const cuDoubleComplex alf = make_cuDoubleComplex(1, 0);
const cuDoubleComplex bet = make_cuDoubleComplex(0, 0);
const cuDoubleComplex *alpha = &alf;
const cuDoubleComplex *beta = &bet;
cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, alpha, d_B, n, d_A, n, beta, d_C, n); // Perform the cublas matrix multiplication
cudaMemcpy(C, d_C, n * n * sizeof(cuDoubleComplex),cudaMemcpyDeviceToHost); // Copy product back to host
clock_t multiply_end = clock();
double time_spent = (double)(multiply_end - multiply_begin) / CLOCKS_PER_SEC;
multiply_total_time[0] = multiply_total_time[0] + time_spent;
}
void matrix_Multiply(cublasHandle_t &handle, cuDoubleComplex *A, cuDoubleComplex *B, cuDoubleComplex *C, cuDoubleComplex *d_A, cuDoubleComplex* d_B, cuDoubleComplex *d_C, int n, double* multiply_total_time){
clock_t multiply_begin = clock();
const cuDoubleComplex alf = make_cuDoubleComplex(1, 0);
const cuDoubleComplex bet = make_cuDoubleComplex(0, 0);
const cuDoubleComplex *alpha = &alf;
const cuDoubleComplex *beta = &bet;
cudaMemcpy(d_A, A, n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy first operand to the device
cudaMemcpy(d_B, B, n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy second operand to the device
cublasZgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, alpha, d_B, n, d_A, n, beta, d_C, n); // Perform the cublas matrix multiplication
cudaMemcpy(C, d_C, n * n * sizeof(cuDoubleComplex),cudaMemcpyDeviceToHost); // Copy product back to host
clock_t multiply_end = clock();
double time_spent = (double)(multiply_end - multiply_begin) / CLOCKS_PER_SEC;
multiply_total_time[0] = multiply_total_time[0] + time_spent;
}
cuDoubleComplex **get_Matrix_Powers_New(cuDoubleComplex *A, cuDoubleComplex* d_A, cuDoubleComplex* d_B, cuDoubleComplex* d_C, cublasHandle_t handle, int n, double* multiply_total_time) {
cuDoubleComplex **Tpowers = (cuDoubleComplex **) malloc(11 * sizeof(cuDoubleComplex *));
for (int i = 0; i < 11; i++) {
Tpowers[i] = (cuDoubleComplex *) malloc(n * n * sizeof(cuDoubleComplex));
}
memcpy(Tpowers[1], A, n * n * sizeof(cuDoubleComplex));
// Is it possible to reduce the number of memroy copies here?
// 1 --- 1
// 2 ---- 2
// 4 --- 6
// 4 ---- 4
cudaMemcpy(d_A, Tpowers[1], n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy first operand to the device
cudaMemcpy(d_B, Tpowers[1], n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy second operand to the d
new_matrix_multiply(handle, d_A, d_B, Tpowers[2], d_A, d_B, d_C, n, multiply_total_time);
//cudaMemcpy(C, d_C, n * n * sizeof(cuDoubleComplex),cudaMemcpyDeviceToHost); // Copy product back to host
//cuDoubleComplex * tmp = d_A; d_A = d_C; d_C = tmp;
cudaMemcpy(d_A, Tpowers[2], n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy first operand to the device
//cudaMemcpy(d_B, Tpowers[2], n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy second operand to the d
new_matrix_multiply(handle, d_A, d_A, Tpowers[4], d_A, d_B, d_C, n, multiply_total_time);
//matrix_complex_print(Tpowers[4], n);
cudaMemcpy(d_A, Tpowers[4], n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy first operand to the devic
new_matrix_multiply(handle, d_A, d_B, Tpowers[6], d_A, d_B, d_C, n, multiply_total_time);
cudaMemcpy(d_A, Tpowers[4], n * n * sizeof(cuDoubleComplex),cudaMemcpyHostToDevice); // Copy first operand to the devic
new_matrix_multiply(handle, d_B, d_B, Tpowers[8], d_A, d_B, d_C, n, multiply_total_time);
return Tpowers;
// IS IT FASTER & IS IT CORRECT?
// matrix_Multiply(handle, Tpowers[1], Tpowers[1], Tpowers[2], d_A, d_B, d_C, n, multiply_total_time);
// matrix_Multiply(handle, Tpowers[2], Tpowers[2], Tpowers[4], d_A, d_B, d_C, n, multiply_total_time);
// matrix_Multiply(handle, Tpowers[4], Tpowers[2], Tpowers[6], d_A, d_B, d_C, n, multiply_total_time);
// matrix_Multiply(handle, Tpowers[4], Tpowers[4], Tpowers[8], d_A, d_B, d_C, n, multiply_total_time);
}
// ---- Current Work ---
void InverseOfMatrix_Alternative_Two(cuDoubleComplex* L, cuDoubleComplex* inverse, int n, cuDoubleComplex* b){ // Calculate matrix inverse through LU factorisation
cusolverStatus_t status; // Link to the cusolver context
cusolverDnHandle_t handler;
status = cusolverDnCreate(&handler);
cuDoubleComplex* A;
int* dLUPivots_ALT;
int* dLUInfo_ALT;
cuDoubleComplex *buffer = NULL;
int bufferSize = 0;
int h_info = 0;
cuDoubleComplex *x;
cudaMalloc(&A, sizeof(cuDoubleComplex)*n*n), "Failed to allocate A!";
cudaMalloc(&x, n * n*sizeof(cuDoubleComplex)), "Failed to allocate x!";
cudaMalloc(&dLUPivots_ALT, n * sizeof(int)), "Failed to allocate dLUPivots!";
cudaMalloc(&dLUInfo_ALT, sizeof(int)), "Failed to allocate dLUInfo!";
cudaMemcpy(A, L, n*n*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice), "Failed to copy to adL!";
cudaMemcpy(x, b, sizeof(cuDoubleComplex)*n*n, cudaMemcpyHostToDevice);
cusolverDnZgetrf_bufferSize(handler, n, n, (cuDoubleComplex*)A, n, &bufferSize);
cudaMalloc(&buffer, sizeof(cuDoubleComplex)*bufferSize);
status = cusolverDnZgetrf(handler, n, n, A, n, buffer, dLUPivots_ALT, dLUInfo_ALT);
if(status!=CUSOLVER_STATUS_SUCCESS){
printf("ERROR!!\n");
}
cudaMemcpy(&h_info, dLUInfo_ALT, sizeof(int), cudaMemcpyDeviceToHost);
if ( h_info != 0 ){
fprintf(stderr, "Error: LU factorization failed\n");
printf("%d\n", h_info );
}
cusolverDnZgetrs(handler, CUBLAS_OP_N, n, n, A, n, dLUPivots_ALT, x, n, dLUInfo_ALT);
cudaDeviceSynchronize();
if(status!=CUSOLVER_STATUS_SUCCESS){
printf("ERROR!!\n");
}
cudaMemcpy(&h_info, dLUInfo_ALT, sizeof(int), cudaMemcpyDeviceToHost);
if ( h_info != 0 ){
fprintf(stderr, "Error: LU factorization failed\n");
}
cudaMemcpy(inverse, x, sizeof(cuDoubleComplex) * n * n, cudaMemcpyDeviceToHost), "Failed to copy to res!";
// Free device memory:
cudaFree(dLUPivots_ALT);
cudaFree(dLUInfo_ALT);
cudaFree(A);
cudaFree(x);
cudaFree(buffer);
}
void matrix_Subtract_New(const cuDoubleComplex *a, const cuDoubleComplex *b, cuDoubleComplex *c, int n) { // PARALLEL CANDIDATE
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
c[(n * i) + j] = cuCsub(a[(n * i) + j], b[(n * i) + j]); // Complex subtraction
}
}
}
void matrixAdd_New(const cuDoubleComplex *a, const cuDoubleComplex *b, cuDoubleComplex *c, int n) { // PARALLEL CANDIDATE
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
c[(n * i) + j] = cuCadd(a[(n * i) + j], b[(n * i) + j]); // Complex addition
}
}
}
void set_Identity_New(cuDoubleComplex *i_matrix, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
if (i == j) {
i_matrix[(n * i) + j].x = 1;
} else {
i_matrix[(n * i) + j].x = 0;
}
}
}
}
void matrix_Scale_New(cuDoubleComplex *a, cuDoubleComplex *scaled, cuDoubleComplex scale, int n, double* scale_total_time ) {
clock_t scale_begin = clock();
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
//scaled[(n * i) + j] = a[(n * i) + j] * scale;
scaled[(n * i) + j] = cuCmul(a[(n * i) + j],scale); // Complex multiplication
}
}
clock_t scale_end = clock();
double time_spent = (double)(scale_end - scale_begin) / CLOCKS_PER_SEC;
scale_total_time[0] = scale_total_time[0] + time_spent;
}
void matrix_Absolute_New(cuDoubleComplex *a, cuDoubleComplex *b, int n) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
b[(n * i) + j].x = cuCabs((a[(n * i) + j]));
b[(n * i) + j].y = 0;
}
}
}
double calculate_one_norm_New_complex(const cuDoubleComplex *A, int n) {
double max = -DBL_MAX;
double count;
for (int i = 0; i < n; i++) {
count = 0;
for (int j = 0; j < n; j++) {
count += cuCabs((A[(n * j) + i]));
}
if (count > max) {;
max = count;
};
}
return max;
}
// COMPUTING OPTIMAL PARAMETERS
double ell(cuDoubleComplex *A, cuDoubleComplex *temp_new, double coeff, int m_val, int n, double* scale_total_time) {
double norm_one, norm_two, p, alpha, output;
memcpy(A, temp_new, n * n * sizeof(cuDoubleComplex));
matrix_Absolute_New(A, temp_new, n);
p = pow(coeff, (1.0 / (2 * m_val + 1)));
matrix_Scale_New(temp_new, temp_new, make_cuDoubleComplex(p, 0), n, scale_total_time);
norm_one = calculate_one_norm_New_complex(A, n);
norm_two = calculate_one_norm_New_complex(temp_new, n);
alpha = norm_two / norm_one;
output = fmax(ceil(log2((2 * alpha) / 2.220446049250313e-16) / (2 * m_val)), 0);
return output;
}
void get_pade_coefficients(double *buf, int m) {
double coefficients[5][14] = {
{120, 60, 12, 1},
{30240, 15120, 3360, 420, 30, 1},
{17297280, 8648640, 1995840, 277200, 25200, 1512, 56 ,1},
{17643225600, 8821612800, 2075673600, 302702400, 30270240, 2162160, 110880, 3960, 90, 1},
{64764752532480000, 32382376266240000, 7771770303897600, 1187353796428800, 129060195264000, 10559470521600, 670442572800, 33522128640, 1323241920, 40840800, 960960, 16380, 182, 1}
};
switch (m) {
case 3 : {
buf = coefficients[0];
}
case 5 : {
buf = coefficients[1];
}
case 7 : {
buf = coefficients[2];
}
case 9 : {
buf = coefficients[3];
}
case 13 : {
for (int i = 0; i < sizeof(coefficients[4]) / sizeof(double); i++) {
buf[i] = coefficients[4][i];
}
}
default:
break;
}
}
int main(){
cublasHandle_t handle;
cublasCreate(&handle);
// VARIABLES TO HOLD TOTAL COMPONENT TIMES:
double* scale_total_time = (double *) malloc(1 * sizeof(double));
double* multiply_total_time = (double *) malloc(1 * sizeof(double));
cuDoubleComplex *A;
int n = 1000;
A = (cuDoubleComplex *) malloc(n * n * sizeof(cuDoubleComplex));
printf("HERE\n");
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
A[(n*i) + j].x = (n*i) + j;
A[(n*i) + j].y = (i*j);
}
}
printf("HERE\n");
clock_t begin = clock(); // Begin recording expm execution
// CUBLAS HANDLE:
// cublasHandle_t handle;
// cublasCreate(&handle);
// Allocate 3 arrays on GPU
cuDoubleComplex *d_A, *d_B, *d_C;
cudaMalloc(&d_A, n * n * sizeof(cuDoubleComplex));
cudaMalloc(&d_B, n * n * sizeof(cuDoubleComplex));
cudaMalloc(&d_C, n * n * sizeof(cuDoubleComplex));
double theta[5] = {1.495585217958292e-002, 2.539398330063230e-001,
9.504178996162932e-001, 2.097847961257068e+000,
5.371920351148152e+000};
double error_coefficients[5] = {1 / 100800.0, 1 / 10059033600.0, 1 / 4487938430976000.0,
1 / 113250775606021113483283660800000000.0,
1 / 113250775606021113483283660800000000.0};
// Allocate temporary arrays to hold temporary matrices used at various stages in the calculation
cuDoubleComplex *identity_new;
cuDoubleComplex *U_new;
cuDoubleComplex *V_new;
cuDoubleComplex *temp_new;
cuDoubleComplex *temp_2_new;
identity_new = (cuDoubleComplex *) malloc(n * n * sizeof(cuDoubleComplex));
U_new = (cuDoubleComplex *) malloc(n * n * sizeof(cuDoubleComplex));
V_new = (cuDoubleComplex *) malloc(n * n * sizeof(cuDoubleComplex));
temp_new = (cuDoubleComplex *) malloc(n * n * sizeof(cuDoubleComplex));
temp_2_new = (cuDoubleComplex *) malloc(n * n * sizeof(cuDoubleComplex));
double d4, d6, d8, d10, eta1, eta3, eta4, eta5, s;
cuDoubleComplex **Tpowers = get_Matrix_Powers_New(A, d_A, d_B, d_C, handle, n, multiply_total_time);
d4 = pow(calculate_one_norm_New_complex(Tpowers[4], n), (1.0 / 4));
d6 = pow(calculate_one_norm_New_complex(Tpowers[6], n), (1.0 / 6));
eta1 = fmax(d4, d6);
int m_val = 0;
if (eta1 <= theta[1] && ell(A, temp_new, error_coefficients[1], 3, n, scale_total_time) == 0.0) {
m_val = 3;
}
if (eta1 <= theta[2] && ell(A, temp_new, error_coefficients[2], 5, n, scale_total_time) == 0.0) {
m_val = 5;
}
d8 = pow(calculate_one_norm_New_complex(Tpowers[8], n), (1.0 / 8));
eta3 = fmax(d6, d8);
if (eta3 <= theta[3] && ell(A, temp_new, error_coefficients[3], 7, n, scale_total_time) == 0.0) {
m_val = 7;
}
if (eta3 <= theta[4] && ell(A, temp_new, error_coefficients[4], 0, n, scale_total_time) == 0.0) {
m_val = 9;
}
d10 = pow(calculate_one_norm_New_complex(Tpowers[10], n), (1.0 / 10));
eta4 = fmax(d8, d10);
eta5 = fmin(eta3, eta4);
s = fmax(ceil(log2(eta5 / theta[4])), 0);
matrix_Scale_New(A, A, make_cuDoubleComplex(1 / pow(2, s), 0), n, scale_total_time);
s = s + ell(A, temp_new, error_coefficients[4], 13, n, scale_total_time);
if (isinf(s)) { // Revert to old estimate
int exp;
double t = frexp(calculate_one_norm_New_complex(A, n) / theta[4], &exp);
s = s - (t == 0.5);
} else {
m_val = 13;
}
if ((int) s != 0) { // Rescale the matrix powers array - Batch potential?
// const cuDoubleComplex alf = make_cuDoubleComplex(1, 0);
// const cuDoubleComplex bet = make_cuDoubleComplex(0, 0);
// const cuDoubleComplex *alpha = &alf;
// const cuDoubleComplex *beta = &bet;
cuDoubleComplex multiplier = make_cuDoubleComplex(0, 0);
multiplier.x = 1.0 / pow(2, (s * 1));
matrix_Scale_New(Tpowers[1], Tpowers[1], multiplier, n, scale_total_time);
multiplier.x = 1.0 / pow(2, (s * 2));
matrix_Scale_New(Tpowers[2], Tpowers[2], multiplier, n, scale_total_time);
multiplier.x = 1.0 / pow(2, (s * 4));
matrix_Scale_New(Tpowers[4], Tpowers[4], multiplier, n, scale_total_time);
multiplier.x = 1.0 / pow(2, (s * 6));
matrix_Scale_New(Tpowers[6], Tpowers[6], multiplier, n, scale_total_time);
}
// PADE APPROXIMATION:
double c[15] = {1};
get_pade_coefficients(c, m_val);
set_Identity_New(identity_new, n);
if (m_val == 3 || m_val == 5 || m_val == 7 || m_val == 9) {
int strt = sizeof(Tpowers) + 2;
for (int k = strt; k < m_val - 1; k += 2) {
matrix_Multiply(handle, Tpowers[2], Tpowers[k-2], Tpowers[k], d_A, d_B, d_C, n, multiply_total_time);
}
matrix_Scale_New(identity_new, U_new, make_cuDoubleComplex (c[1], 0), n, scale_total_time);
matrix_Scale_New(identity_new, V_new, make_cuDoubleComplex (c[0], 0), n, scale_total_time);
for (int j = m_val; j > n; j -= 2) {
matrix_Scale_New(Tpowers[j - 1], temp_new, make_cuDoubleComplex(c[j + 1], 0), n, scale_total_time);
matrixAdd_New(U_new, temp_new, U_new, n);
matrix_Scale_New(Tpowers[j - 1], temp_new, make_cuDoubleComplex(c[j], 0), n, scale_total_time);
matrixAdd_New(V_new, temp_new, V_new, n);
}
matrix_Multiply(handle, U_new, A, temp_new, d_A, d_B, d_C, n, multiply_total_time);
memcpy(U_new, temp_new, n * n * sizeof(cuDoubleComplex));
}
if (m_val == 13) {
clock_t VU_BEGIN = clock();
// CALCULATE U:
matrix_Scale_New(Tpowers[6], temp_new, make_cuDoubleComplex(c[13], 0), n, scale_total_time);
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex)); // IS THIS NEEDED?
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
matrix_Scale_New(Tpowers[4], temp_new, make_cuDoubleComplex(c[11], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(cuDoubleComplex)); // IS THIS NEEDED?
matrix_Scale_New(Tpowers[2], temp_new, make_cuDoubleComplex(c[9], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Multiply(handle, Tpowers[6], temp_2_new, temp_new, d_A, d_B, d_C, n, multiply_total_time);
matrix_Scale_New(Tpowers[6], temp_2_new, make_cuDoubleComplex(c[7], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
matrix_Scale_New(Tpowers[4], temp_2_new, make_cuDoubleComplex(c[5], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
matrix_Scale_New(Tpowers[2], temp_2_new, make_cuDoubleComplex(c[3], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
set_Identity_New(identity_new, n);
matrix_Scale_New(identity_new, temp_2_new, make_cuDoubleComplex(c[1], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(U_new, 0, n * n * sizeof(cuDoubleComplex)); // IS THIS NEEDED?
matrix_Multiply(handle, temp_new, Tpowers[1], U_new, d_A, d_B, d_C, n, multiply_total_time);
// CALCULATE V:
memset(temp_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Scale_New(Tpowers[6], temp_new, make_cuDoubleComplex(c[12], 0), n, scale_total_time);
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Scale_New(Tpowers[4], temp_2_new, make_cuDoubleComplex(c[10], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Scale_New(Tpowers[2], temp_new, make_cuDoubleComplex(c[8], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_2_new, n);
memset(temp_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Multiply(handle, temp_2_new, Tpowers[6], temp_new, d_A, d_B, d_C, n, multiply_total_time);
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Scale_New(Tpowers[6], temp_2_new, make_cuDoubleComplex(c[6], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Scale_New(Tpowers[4], temp_2_new, make_cuDoubleComplex(c[4], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Scale_New(Tpowers[2], temp_2_new, make_cuDoubleComplex(c[2], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, temp_new, n);
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex));
set_Identity_New(identity_new, n);
matrix_Scale_New(identity_new, temp_2_new, make_cuDoubleComplex(c[0], 0), n, scale_total_time);
matrixAdd_New(temp_new, temp_2_new, V_new, n);
// CALCULATE V-U
matrix_Subtract_New(V_new, U_new, V_new, n);
matrix_Scale_New(U_new, U_new, make_cuDoubleComplex(2,0), n, scale_total_time);
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex));
clock_t inverse_begin = clock();
InverseOfMatrix_Alternative_Two(V_new, temp_2_new, n, identity_new);
clock_t inverse_end = clock();
double inverse_total_time = (double)(inverse_end - inverse_begin) / CLOCKS_PER_SEC;
memset(temp_new, 0, n * n * sizeof(cuDoubleComplex));
matrix_Multiply(handle, temp_2_new, U_new, temp_new, d_A, d_B, d_C, n, multiply_total_time);
// CALCULATE F:
matrixAdd_New(temp_new, identity_new, temp_new, n);
clock_t VU_END = clock();
double VU_time_spent = (double)(VU_END - VU_BEGIN) / CLOCKS_PER_SEC; // End recording expm execution
clock_t SQUARE_BEGIN = clock();
// SQUARING PHASE:
for (int k = 0; k < s; k++) {
matrix_Multiply(handle, temp_new, temp_new, temp_2_new, d_A, d_B, d_C, n, multiply_total_time);
memcpy(temp_new, temp_2_new, n * n * sizeof(cuDoubleComplex));
memset(temp_2_new, 0, n * n * sizeof(cuDoubleComplex));// Is this needed?
}
// PERFORMANCE OUTPUT:
clock_t SQUARE_END = clock();
double SQUARE_time_spent = (double)(SQUARE_END - SQUARE_BEGIN) / CLOCKS_PER_SEC; // End recording expm execution
// PERFORMANCE OUTPUT:
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; // End recording expm execution
printf("\n----------------------- MATRIX OPERATIONS PERCENTAGE BREAKDOWN -----------------------\n");
printf("\n TOTAL TIME ELAPSED: %lf seconds \n", time_spent);
printf("\n INVERSE: %lf%% \n", (inverse_total_time/time_spent)*100);
printf("\n SCALE: %lf%% \n", (scale_total_time[0]/time_spent)*100);
printf("\n MULTIPLY: %lf%% \n", (multiply_total_time[0]/time_spent)*100);
printf("\n V-U TIME SPENT: %lf%% \n", (VU_time_spent/time_spent)*100);
printf("\n SQUARING TIME SPENT: %lf%% \n", (SQUARE_time_spent/time_spent)*100);
// memcpy(expo, temp_new, n * n * sizeof(cuDoubleComplex)); // Copy to system propegator memory
}
// Free host memory
free(identity_new);
free(U_new);
free(V_new);
free(temp_new);
free(temp_2_new);
free(scale_total_time);
free(multiply_total_time);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
// Resources
// http://www.netlib.org/utk/people/JackDongarra/PAPERS/Factor_Inversion_Million_Matrices-iccs17.pdf
// http://mathforcollege.com/nm/mws/che/04sle/mws_che_sle_spe_luinverse.pdf |
ed8a68f8d4e5a818c8769a2e6a6fa1f4050cdc58.hip | // !!! This is a file automatically generated by hipify!!!
/*
nvcc -o matrixMulCUBLAS matrixMulCUBLAS.cpp -I/usr/local/cuda-6.5/samples/common/inc/ -lcublas
*/
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_string.h>
void
matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size_A);
if (error != hipSuccess)
{
printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_B, mem_size_B);
if (error != hipSuccess)
{
printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMalloc((void **) &d_C, mem_size_C);
if (error != hipSuccess)
{
printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 10;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
//Test on CPU
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
printf("Running on host ...\n");
sdkStartTimer(&timer);
for(int k=0; k<nIter; k++) {
printf("k=%d ... ", k);
matrixMulCPU(h_C, h_A, h_B, dimsA.x, dimsA.x, dimsB.x);
}
sdkStopTimer(&timer);
double msecTotalCPU = sdkGetTimerValue(&timer);
float msecPerVecAddCPU = msecTotalCPU / nIter;
printf("Time spent by CPU: %.3f msec\n", msecPerVecAddCPU);
printf("Speedup: %.3f\n", msecPerVecAddCPU / msecPerMatrixMul);
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
hipDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess)
{
printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
| ed8a68f8d4e5a818c8769a2e6a6fa1f4050cdc58.cu | /*
nvcc -o matrixMulCUBLAS matrixMulCUBLAS.cpp -I/usr/local/cuda-6.5/samples/common/inc/ -lcublas
*/
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Matrix multiplication: C = A * B.
* Host code.
*
* This sample implements matrix multiplication as described in Chapter 3
* of the programming guide.
* It has been written for clarity of exposition to illustrate various CUDA
* programming principles, not with the goal of providing the most
* performant generic kernel for matrix multiplication.
*
* See also:
* V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
* in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
* Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_string.h>
void
matrixMulCPU(float *C, const float *A, const float *B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j)
{
double sum = 0;
for (unsigned int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
/**
* Matrix multiplication (CUDA Kernel) on the device: C = A * B
* wA is A's width and wB is B's width
*/
template <int BLOCK_SIZE> __global__ void
matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
void constantInit(float *data, int size, float val)
{
for (int i = 0; i < size; ++i)
{
data[i] = val;
}
}
/**
* Run a simple test of matrix multiplication using CUDA
*/
int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
{
// Allocate host memory for matrices A and B
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
float *h_A = (float *)malloc(mem_size_A);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
float *h_B = (float *)malloc(mem_size_B);
// Initialize host memory
const float valB = 0.01f;
constantInit(h_A, size_A, 1.0f);
constantInit(h_B, size_B, valB);
// Allocate device memory
float *d_A, *d_B, *d_C;
// Allocate host matrix C
dim3 dimsC(dimsB.x, dimsA.y, 1);
unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
float *h_C = (float *) malloc(mem_size_C);
if (h_C == NULL)
{
fprintf(stderr, "Failed to allocate host matrix C!\n");
exit(EXIT_FAILURE);
}
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size_A);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, mem_size_B);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, mem_size_C);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y);
// Create and start timer
printf("Computing result using CUDA Kernel...\n");
// Performs warmup operation using matrixMul CUDA kernel
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Execute the kernel
int nIter = 10;
for (int j = 0; j < nIter; j++)
{
if (block_size == 16)
{
matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
else
{
matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x);
}
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
printf("Checking computed result for correctness: ");
bool correct = true;
for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
{
if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5)
{
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB);
correct = false;
}
}
printf("%s\n", correct ? "OK" : "FAIL");
//Test on CPU
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
printf("Running on host ...\n");
sdkStartTimer(&timer);
for(int k=0; k<nIter; k++) {
printf("k=%d ... ", k);
matrixMulCPU(h_C, h_A, h_B, dimsA.x, dimsA.x, dimsB.x);
}
sdkStopTimer(&timer);
double msecTotalCPU = sdkGetTimerValue(&timer);
float msecPerVecAddCPU = msecTotalCPU / nIter;
printf("Time spent by CPU: %.3f msec\n", msecPerVecAddCPU);
printf("Speedup: %.3f\n", msecPerVecAddCPU / msecPerMatrixMul);
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
cudaDeviceReset();
if (correct)
{
return EXIT_SUCCESS;
}
else
{
return EXIT_FAILURE;
}
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Multiply Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf("Usage -device=n (n >= 0 for deviceID)\n");
printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
exit(EXIT_SUCCESS);
}
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **)argv, "device"))
{
devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Use a larger block size for Fermi and above
int block_size = (deviceProp.major < 2) ? 16 : 32;
dim3 dimsA(5*2*block_size, 5*2*block_size, 1);
dim3 dimsB(5*4*block_size, 5*2*block_size, 1);
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
{
dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
{
dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
}
// width of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
{
dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
}
// height of Matrix B
if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
{
dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
}
if (dimsA.x != dimsB.y)
{
printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
dimsA.x, dimsB.y);
exit(EXIT_FAILURE);
}
printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
exit(matrix_result);
}
|
80b7c98e2c04abb0bf3444a47ae903ab6e82ed6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "util.h"
#include <stdio.h>
#include <assert.h>
#include <iostream>
//#include <glog/logging.h>
#include <hip/hip_runtime.h>
#include "hipsparse.h"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
const int batch = 12;
const int voc = 50000;
const int embed = 1001;
int W = 1000;
#define LOG(INFO) std::cout
#define CHECK assert
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
unsigned int * d_starts, * d_lengths;
// input
//50000 12
template<typename dType>
dType* read_matrix(const char* fn){
FILE *fp = NULL;
CHECK(fp = fopen(fn, "rb"));
int meta[2];
CHECK(fread(meta, sizeof(int), 2, fp) == 2);
int count = meta[0] * meta[1];
dType* h_data = (dType *)malloc(count*sizeof(dType));
dType* d_data;
CHECK(fread(h_data, sizeof(dType), count, fp) == count);
checkCudaError(hipMalloc(&d_data, count*sizeof(dType)));
checkCudaError(hipMemcpy(d_data, h_data, count*sizeof(dType),
hipMemcpyHostToDevice));
std::cout << fn << " " << meta[0] << " " << meta[1] << "\n";
return d_data;
}
__device__
unsigned int hash_func_1_gpu(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__device__
unsigned int hash_func_2_gpu(unsigned int key){
unsigned int c2=0x27d4eb2d; // a prime or an odd constant
key = (key ^ 61) ^ (key >> 16);
key = key + (key << 3);
key = key ^ (key >> 4);
key = key * c2;
key = key ^ (key >> 15);
return key;
}
template<typename dType>
void print_matrix_gpu(dType *d_matrix,int rows,int cols, int row_start, int row_end, int col_start, int col_end) {
dType * h_matrix = (dType *)malloc(rows*cols*sizeof(dType));
hipMemcpy(h_matrix, d_matrix, rows*cols*sizeof(dType), hipMemcpyDeviceToHost);
for(int i=row_start; i<row_end; i++) {
for(int j=col_start; j<col_end; j++) {
std::cout << h_matrix[i + j*rows] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
free(h_matrix);
}
// <<<1,32>>>
__global__ void inc_range(float *d_outputdist, unsigned int * d_bands_index, int start, int length, int w_index, int batch_index, int vocab_size){
for (int i = threadIdx.x; i < length; i += blockDim.x){
unsigned int word_index = d_bands_index[IDX2C(start + i, w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
}
}
// d_codes: [W, batch_size]
// d_bands_index: [vocab_sizeW]
// d_outputdist: [vocab_size, batch_size]
// <<<(batch_size,batch), 256>>> : each block is responsible for each batch
// <<<64, 12>>>
template<typename dType>
__global__
void cuckoo_lookup_T(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index){
int batch_index = blockIdx.x;
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
//printf("%d %d %d\n", threadIdx.x, batch_index, length);
if (length >= 256){
hipStream_t s;
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
hipLaunchKernelGGL(( inc_range), dim3(1),dim3(256),0,s, d_outputdist,d_bands_index,start,length,w_index,batch_index,vocab_size);
} else {
for (int i = 0 ; i< length; i ++ ){
unsigned int word_index = d_bands_index[IDX2C(start + i, w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
}
}
}
}
template<typename dType>
__global__
void cuckoo_lookup_T_3(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index){
int batch_index = blockIdx.x;
const int maxThreads = 1024;
__shared__ int s_w_index[maxThreads];
__shared__ int s_start[maxThreads];
__shared__ int s_length[maxThreads];
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
s_w_index[threadIdx.x] = w_index;
s_start[threadIdx.x] = start;
s_length[threadIdx.x] = length;
int i_start = (threadIdx.x / 32) * 32;
int nalive_thread_in_warp = (blockDim.x - i_start > 32) ? 32 : blockDim.x - i_start;
for (int i = i_start; i < i_start + 32 && i < blockDim.x; i++){
int _length = s_length[i];
int _w_index = s_w_index[i];
int _start = s_start[i];
for (int j = threadIdx.x % 32; j < _length; j += nalive_thread_in_warp){
unsigned int word_index = d_bands_index[IDX2C(_start + j, _w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
}
}
}
}
template<typename dType>
__global__
void cuckoo_lookup_T_4(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index){
int batch_index = blockIdx.x;
const int maxThreads = 1024;
__shared__ int s_w_index[maxThreads];
__shared__ int s_start[maxThreads];
__shared__ int s_length[maxThreads];
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
s_w_index[threadIdx.x] = w_index;
s_start[threadIdx.x] = start;
s_length[threadIdx.x] = length;
__syncthreads();
int n_alive_thread = (w_index >= W / blockDim.x * blockDim.x ) ? W - W / blockDim.x * blockDim.x : blockDim.x;
int i_start = (threadIdx.x / 32) * 32;
int nalive_thread_in_warp = (blockDim.x - i_start > 32) ? 32 : blockDim.x - i_start;
int ii = threadIdx.x % 32;
while(ii < n_alive_thread){
int _length = atomicSub(s_length+ii, 1);
if (_length > 0){
int _w_index = s_w_index[ii];
int _start = atomicAdd(s_start+ii, 1);
unsigned int word_index = d_bands_index[IDX2C(_start, _w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
} else {
ii += nalive_thread_in_warp;
}
}
}
}
//<<<(batch, (vocab + 10k-1) / 10k), ::min(W,1024)>>>
template<typename dType>
__global__
void cuckoo_lookup_T_2(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index ){
// init the shared memory to zero
const int N = 10000;
__shared__ int vocab_shared[N];
int word_index_begin = blockIdx.y * N;
int word_index_end = (word_index_begin + N > vocab_size) ? vocab_size : word_index_begin + N;
for (int i = threadIdx.x; i < N; i += blockDim.x){
vocab_shared[i] = 0;
}
__syncthreads();
int batch_index = blockIdx.x;
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
/*
if(length < 256){
for (int i =0; i< length; i ++){
unsigned int word_index = d_bands_index[IDX2C(start + i, w_index, vocab_size)];
if (word_index >= word_index_begin && word_index < word_index_end){
atomicAdd(vocab_shared + (word_index % N), 1);
}
}
}
*/
}
//__syncthreads();
/*
// copy the value back to global_memory
for(int i = threadIdx.x; i < word_index_end - word_index_begin; i += blockDim.x){
int wi = word_index_begin + i;
d_outputdist[IDX2C(wi,batch_index, vocab_size)] = vocab_shared[i];
}
*/
}
int main() {
unsigned int* d_bands_index = read_matrix<unsigned int>("./data/d_bands_index_input.txt.bin");
//50000 1000
unsigned int* d_ht_pad_codes = read_matrix<unsigned int>("./data/d_ht_pad_codes_input.txt.bin");
//1000 12
unsigned int* d_key1 = read_matrix<unsigned int>("./data/d_key1_input.txt.bin");
//50000 1000
unsigned int* d_key2 = read_matrix<unsigned int>("./data/d_key2_input.txt.bin");
//50000 1000
unsigned int* d_length1 = read_matrix<unsigned int>("./data/d_length1_input.txt.bin");
//50000 1000
unsigned int* d_length2 = read_matrix<unsigned int>("./data/d_length2_input.txt.bin");
//50000 1000
float* d_dist_input = read_matrix<float>("./data/d_outputdist_lookup_input.txt.bin");
//50000 12
unsigned int* d_value1 = read_matrix<unsigned int>("./data/d_value1_input.txt.bin");
//50000 1000
unsigned int* d_value2 = read_matrix<unsigned int>("./data/d_value2_input.txt.bin");
//50000 1000
// output
float* d_dist_output = read_matrix<float>("./data/d_outputdist_input.txt.bin");
checkCudaError(hipMalloc(&d_starts, 1024 * batch*sizeof(unsigned int)));
checkCudaError(hipMalloc(&d_lengths, 1024 * batch*sizeof(unsigned int)));
{ //dense2array
float msecTotal = 0.0f;
int Test = 1;
int *d_starts, *d_lengths;
hipMalloc((void **)&d_starts, W * batch * sizeof(int));
hipMalloc((void **)&d_lengths, W * batch * sizeof(int));
hipEvent_t start, stop;
checkCudaError(hipEventCreate(&start));
checkCudaError(hipEventCreate(&stop));
checkCudaError(hipEventRecord(start, NULL));
for (int i = 0; i < Test; i++) {
hipMemset(d_dist_input, 0, voc * batch* sizeof(float)); //0.02ms
hipLaunchKernelGGL(( cuckoo_lookup_T_3), dim3(batch), dim3(::min(1024,W)), 0, 0, d_ht_pad_codes, d_dist_input, batch, voc, W, d_key1, d_value1, d_length1, d_key2, d_value2, d_length2, d_bands_index); // 0.9 ms
hipDeviceSynchronize();
//cuckoo_lookup_T<<<batch, ::min(1024,W)>>>(d_ht_pad_codes, d_dist_input, batch, voc, W, d_key1, d_value1, d_length1, d_key2, d_value2, d_length2, d_bands_index); // 0.9 ms
//const int N = 10000;
//cuckoo_lookup_T_2<<<dim3(batch, (voc+N-1)/N ), ::min(1024,W)>>>(d_ht_pad_codes, d_dist_input, batch, voc, W, d_key1, d_value1, d_length1, d_key2, d_value2, d_length2, d_bands_index); // 0.9 ms
}
checkCudaError(hipEventRecord(stop, NULL));
checkCudaError(hipEventSynchronize(stop));
checkCudaError(hipEventElapsedTime(&msecTotal, start, stop));
msecTotal /= Test;
LOG(INFO) << "cuckoo_lookup Time= " << msecTotal << " msec, ";
checkCudaError(hipGetLastError());
int offset = 0;
std::cout << "d_dist_input\n";
print_matrix_gpu(d_dist_input, voc, batch, offset, offset+10, 0, batch);
std::cout << "d_dist_output\n";
print_matrix_gpu(d_dist_output, voc, batch, offset, offset+10, 0, batch);
/*
std::cout << "d_bands_index\n";
print_matrix_gpu(d_bands_index, voc, W, offset, offset+10, 0, batch);
std::cout << "d_ht_pad_codes\n";
print_matrix_gpu(d_ht_pad_codes, W, batch, offset, offset+10, 0, batch);
std::cout << "d_key1\n";
print_matrix_gpu(d_key1, voc, W, offset, offset+10, 0, batch);
std::cout << "d_value1\n";
print_matrix_gpu(d_value1, voc, W, offset, offset+10, 0, batch);
std::cout << "d_length1\n";
print_matrix_gpu(d_length1, voc, W, offset, offset+10, 0, batch);
std::cout << "d_key2\n";
print_matrix_gpu(d_key2, voc, W, offset, offset+10, 0, batch);
std::cout << "d_value2\n";
print_matrix_gpu(d_value2, voc, W, offset, offset+10, 0, batch);
std::cout << "d_length2\n";
print_matrix_gpu(d_length2, voc, W, offset, offset+10, 0, batch);
*/
}
return 0;
}
| 80b7c98e2c04abb0bf3444a47ae903ab6e82ed6c.cu | #include "util.h"
#include <stdio.h>
#include <assert.h>
#include <iostream>
//#include <glog/logging.h>
#include <cuda_runtime.h>
#include "cusparse.h"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
const int batch = 12;
const int voc = 50000;
const int embed = 1001;
int W = 1000;
#define LOG(INFO) std::cout
#define CHECK assert
#define IDX2C(i,j,ld) (((j)*(ld))+(i))
unsigned int * d_starts, * d_lengths;
// input
//50000 12
template<typename dType>
dType* read_matrix(const char* fn){
FILE *fp = NULL;
CHECK(fp = fopen(fn, "rb"));
int meta[2];
CHECK(fread(meta, sizeof(int), 2, fp) == 2);
int count = meta[0] * meta[1];
dType* h_data = (dType *)malloc(count*sizeof(dType));
dType* d_data;
CHECK(fread(h_data, sizeof(dType), count, fp) == count);
checkCudaError(cudaMalloc(&d_data, count*sizeof(dType)));
checkCudaError(cudaMemcpy(d_data, h_data, count*sizeof(dType),
cudaMemcpyHostToDevice));
std::cout << fn << " " << meta[0] << " " << meta[1] << "\n";
return d_data;
}
__device__
unsigned int hash_func_1_gpu(unsigned int a){
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__device__
unsigned int hash_func_2_gpu(unsigned int key){
unsigned int c2=0x27d4eb2d; // a prime or an odd constant
key = (key ^ 61) ^ (key >> 16);
key = key + (key << 3);
key = key ^ (key >> 4);
key = key * c2;
key = key ^ (key >> 15);
return key;
}
template<typename dType>
void print_matrix_gpu(dType *d_matrix,int rows,int cols, int row_start, int row_end, int col_start, int col_end) {
dType * h_matrix = (dType *)malloc(rows*cols*sizeof(dType));
cudaMemcpy(h_matrix, d_matrix, rows*cols*sizeof(dType), cudaMemcpyDeviceToHost);
for(int i=row_start; i<row_end; i++) {
for(int j=col_start; j<col_end; j++) {
std::cout << h_matrix[i + j*rows] << " ";
}
std::cout << "\n";
}
std::cout << "\n";
free(h_matrix);
}
// <<<1,32>>>
__global__ void inc_range(float *d_outputdist, unsigned int * d_bands_index, int start, int length, int w_index, int batch_index, int vocab_size){
for (int i = threadIdx.x; i < length; i += blockDim.x){
unsigned int word_index = d_bands_index[IDX2C(start + i, w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
}
}
// d_codes: [W, batch_size]
// d_bands_index: [vocab_size,W]
// d_outputdist: [vocab_size, batch_size]
// <<<(batch_size,batch), 256>>> : each block is responsible for each batch
// <<<64, 12>>>
template<typename dType>
__global__
void cuckoo_lookup_T(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index){
int batch_index = blockIdx.x;
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
//printf("%d %d %d\n", threadIdx.x, batch_index, length);
if (length >= 256){
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
inc_range<<<1,256,0,s>>>(d_outputdist,d_bands_index,start,length,w_index,batch_index,vocab_size);
} else {
for (int i = 0 ; i< length; i ++ ){
unsigned int word_index = d_bands_index[IDX2C(start + i, w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
}
}
}
}
template<typename dType>
__global__
void cuckoo_lookup_T_3(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index){
int batch_index = blockIdx.x;
const int maxThreads = 1024;
__shared__ int s_w_index[maxThreads];
__shared__ int s_start[maxThreads];
__shared__ int s_length[maxThreads];
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
s_w_index[threadIdx.x] = w_index;
s_start[threadIdx.x] = start;
s_length[threadIdx.x] = length;
int i_start = (threadIdx.x / 32) * 32;
int nalive_thread_in_warp = (blockDim.x - i_start > 32) ? 32 : blockDim.x - i_start;
for (int i = i_start; i < i_start + 32 && i < blockDim.x; i++){
int _length = s_length[i];
int _w_index = s_w_index[i];
int _start = s_start[i];
for (int j = threadIdx.x % 32; j < _length; j += nalive_thread_in_warp){
unsigned int word_index = d_bands_index[IDX2C(_start + j, _w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
}
}
}
}
template<typename dType>
__global__
void cuckoo_lookup_T_4(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index){
int batch_index = blockIdx.x;
const int maxThreads = 1024;
__shared__ int s_w_index[maxThreads];
__shared__ int s_start[maxThreads];
__shared__ int s_length[maxThreads];
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
s_w_index[threadIdx.x] = w_index;
s_start[threadIdx.x] = start;
s_length[threadIdx.x] = length;
__syncthreads();
int n_alive_thread = (w_index >= W / blockDim.x * blockDim.x ) ? W - W / blockDim.x * blockDim.x : blockDim.x;
int i_start = (threadIdx.x / 32) * 32;
int nalive_thread_in_warp = (blockDim.x - i_start > 32) ? 32 : blockDim.x - i_start;
int ii = threadIdx.x % 32;
while(ii < n_alive_thread){
int _length = atomicSub(s_length+ii, 1);
if (_length > 0){
int _w_index = s_w_index[ii];
int _start = atomicAdd(s_start+ii, 1);
unsigned int word_index = d_bands_index[IDX2C(_start, _w_index, vocab_size)];
atomicAdd(&d_outputdist[IDX2C(word_index, batch_index, vocab_size)], 1.0);
} else {
ii += nalive_thread_in_warp;
}
}
}
}
//<<<(batch, (vocab + 10k-1) / 10k), std::min(W,1024)>>>
template<typename dType>
__global__
void cuckoo_lookup_T_2(unsigned int *d_codes, dType *d_outputdist,int batch_size, int vocab_size, int W,
unsigned int *d_key_1, unsigned int *d_value_1, unsigned int * d_length_1,
unsigned int *d_key_2, unsigned int *d_value_2, unsigned int * d_length_2,
unsigned int *d_bands_index ){
// init the shared memory to zero
const int N = 10000;
__shared__ int vocab_shared[N];
int word_index_begin = blockIdx.y * N;
int word_index_end = (word_index_begin + N > vocab_size) ? vocab_size : word_index_begin + N;
for (int i = threadIdx.x; i < N; i += blockDim.x){
vocab_shared[i] = 0;
}
__syncthreads();
int batch_index = blockIdx.x;
for (int w_index = threadIdx.x; w_index < W; w_index += blockDim.x){
unsigned int code = d_codes[w_index + batch_index * W];
//cuckoo lookup;
unsigned int key1 = hash_func_1_gpu(code) % vocab_size + w_index * vocab_size;
int start = -1;
int length = 0;
if (d_key_1[key1] == code){
start = d_value_1[key1];
length = d_length_1[key1];
} else {
unsigned int key2 = hash_func_2_gpu(code) % vocab_size + w_index * vocab_size;
if (d_key_2[key2] == code){
start = d_value_2[key2];
length = d_length_2[key2];
}
}
/*
if(length < 256){
for (int i =0; i< length; i ++){
unsigned int word_index = d_bands_index[IDX2C(start + i, w_index, vocab_size)];
if (word_index >= word_index_begin && word_index < word_index_end){
atomicAdd(vocab_shared + (word_index % N), 1);
}
}
}
*/
}
//__syncthreads();
/*
// copy the value back to global_memory
for(int i = threadIdx.x; i < word_index_end - word_index_begin; i += blockDim.x){
int wi = word_index_begin + i;
d_outputdist[IDX2C(wi,batch_index, vocab_size)] = vocab_shared[i];
}
*/
}
int main() {
unsigned int* d_bands_index = read_matrix<unsigned int>("./data/d_bands_index_input.txt.bin");
//50000 1000
unsigned int* d_ht_pad_codes = read_matrix<unsigned int>("./data/d_ht_pad_codes_input.txt.bin");
//1000 12
unsigned int* d_key1 = read_matrix<unsigned int>("./data/d_key1_input.txt.bin");
//50000 1000
unsigned int* d_key2 = read_matrix<unsigned int>("./data/d_key2_input.txt.bin");
//50000 1000
unsigned int* d_length1 = read_matrix<unsigned int>("./data/d_length1_input.txt.bin");
//50000 1000
unsigned int* d_length2 = read_matrix<unsigned int>("./data/d_length2_input.txt.bin");
//50000 1000
float* d_dist_input = read_matrix<float>("./data/d_outputdist_lookup_input.txt.bin");
//50000 12
unsigned int* d_value1 = read_matrix<unsigned int>("./data/d_value1_input.txt.bin");
//50000 1000
unsigned int* d_value2 = read_matrix<unsigned int>("./data/d_value2_input.txt.bin");
//50000 1000
// output
float* d_dist_output = read_matrix<float>("./data/d_outputdist_input.txt.bin");
checkCudaError(cudaMalloc(&d_starts, 1024 * batch*sizeof(unsigned int)));
checkCudaError(cudaMalloc(&d_lengths, 1024 * batch*sizeof(unsigned int)));
{ //dense2array
float msecTotal = 0.0f;
int Test = 1;
int *d_starts, *d_lengths;
cudaMalloc((void **)&d_starts, W * batch * sizeof(int));
cudaMalloc((void **)&d_lengths, W * batch * sizeof(int));
cudaEvent_t start, stop;
checkCudaError(cudaEventCreate(&start));
checkCudaError(cudaEventCreate(&stop));
checkCudaError(cudaEventRecord(start, NULL));
for (int i = 0; i < Test; i++) {
cudaMemset(d_dist_input, 0, voc * batch* sizeof(float)); //0.02ms
cuckoo_lookup_T_3<<<batch, std::min(1024,W)>>>(d_ht_pad_codes, d_dist_input, batch, voc, W, d_key1, d_value1, d_length1, d_key2, d_value2, d_length2, d_bands_index); // 0.9 ms
cudaDeviceSynchronize();
//cuckoo_lookup_T<<<batch, std::min(1024,W)>>>(d_ht_pad_codes, d_dist_input, batch, voc, W, d_key1, d_value1, d_length1, d_key2, d_value2, d_length2, d_bands_index); // 0.9 ms
//const int N = 10000;
//cuckoo_lookup_T_2<<<dim3(batch, (voc+N-1)/N ), std::min(1024,W)>>>(d_ht_pad_codes, d_dist_input, batch, voc, W, d_key1, d_value1, d_length1, d_key2, d_value2, d_length2, d_bands_index); // 0.9 ms
}
checkCudaError(cudaEventRecord(stop, NULL));
checkCudaError(cudaEventSynchronize(stop));
checkCudaError(cudaEventElapsedTime(&msecTotal, start, stop));
msecTotal /= Test;
LOG(INFO) << "cuckoo_lookup Time= " << msecTotal << " msec, ";
checkCudaError(cudaGetLastError());
int offset = 0;
std::cout << "d_dist_input\n";
print_matrix_gpu(d_dist_input, voc, batch, offset, offset+10, 0, batch);
std::cout << "d_dist_output\n";
print_matrix_gpu(d_dist_output, voc, batch, offset, offset+10, 0, batch);
/*
std::cout << "d_bands_index\n";
print_matrix_gpu(d_bands_index, voc, W, offset, offset+10, 0, batch);
std::cout << "d_ht_pad_codes\n";
print_matrix_gpu(d_ht_pad_codes, W, batch, offset, offset+10, 0, batch);
std::cout << "d_key1\n";
print_matrix_gpu(d_key1, voc, W, offset, offset+10, 0, batch);
std::cout << "d_value1\n";
print_matrix_gpu(d_value1, voc, W, offset, offset+10, 0, batch);
std::cout << "d_length1\n";
print_matrix_gpu(d_length1, voc, W, offset, offset+10, 0, batch);
std::cout << "d_key2\n";
print_matrix_gpu(d_key2, voc, W, offset, offset+10, 0, batch);
std::cout << "d_value2\n";
print_matrix_gpu(d_value2, voc, W, offset, offset+10, 0, batch);
std::cout << "d_length2\n";
print_matrix_gpu(d_length2, voc, W, offset, offset+10, 0, batch);
*/
}
return 0;
}
|
407ee35c6df36d1aaf9010c247234bbe3e0f13b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <float.h>
#include <math.h>
#include <tuple>
#include "rasterize_coarse/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh" // For kEpsilon -- gross
__global__ void TriangleBoundingBoxKernel(
const float* face_verts, // (F, 3, 3)
const int F,
const float blur_radius,
float* bboxes, // (4, F)
bool* skip_face) { // (F,)
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = blockDim.x * gridDim.x;
const float sqrt_radius = sqrt(blur_radius);
for (int f = tid; f < F; f += num_threads) {
const float v0x = face_verts[f * 9 + 0 * 3 + 0];
const float v0y = face_verts[f * 9 + 0 * 3 + 1];
const float v0z = face_verts[f * 9 + 0 * 3 + 2];
const float v1x = face_verts[f * 9 + 1 * 3 + 0];
const float v1y = face_verts[f * 9 + 1 * 3 + 1];
const float v1z = face_verts[f * 9 + 1 * 3 + 2];
const float v2x = face_verts[f * 9 + 2 * 3 + 0];
const float v2y = face_verts[f * 9 + 2 * 3 + 1];
const float v2z = face_verts[f * 9 + 2 * 3 + 2];
const float xmin = FloatMin3(v0x, v1x, v2x) - sqrt_radius;
const float xmax = FloatMax3(v0x, v1x, v2x) + sqrt_radius;
const float ymin = FloatMin3(v0y, v1y, v2y) - sqrt_radius;
const float ymax = FloatMax3(v0y, v1y, v2y) + sqrt_radius;
const float zmin = FloatMin3(v0z, v1z, v2z);
const bool skip = zmin < kEpsilon;
bboxes[0 * F + f] = xmin;
bboxes[1 * F + f] = xmax;
bboxes[2 * F + f] = ymin;
bboxes[3 * F + f] = ymax;
skip_face[f] = skip;
}
}
__global__ void PointBoundingBoxKernel(
const float* points, // (P, 3)
const float* radius, // (P,)
const int P,
float* bboxes, // (4, P)
bool* skip_points) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = blockDim.x * gridDim.x;
for (int p = tid; p < P; p += num_threads) {
const float x = points[p * 3 + 0];
const float y = points[p * 3 + 1];
const float z = points[p * 3 + 2];
const float r = radius[p];
// TODO: change to kEpsilon to match triangles?
const bool skip = z < 0;
bboxes[0 * P + p] = x - r;
bboxes[1 * P + p] = x + r;
bboxes[2 * P + p] = y - r;
bboxes[3 * P + p] = y + r;
skip_points[p] = skip;
}
}
__global__ void RasterizeCoarseCudaKernel(
const float* bboxes, // (4, E) (xmin, xmax, ymin, ymax)
const bool* should_skip, // (E,)
const int64_t* elem_first_idxs,
const int64_t* elems_per_batch,
const int N,
const int E,
const int H,
const int W,
const int bin_size,
const int chunk_size,
const int max_elem_per_bin,
int* elems_per_bin,
int* bin_elems) {
extern __shared__ char sbuf[];
const int M = max_elem_per_bin;
// Integer divide round up
const int num_bins_x = 1 + (W - 1) / bin_size;
const int num_bins_y = 1 + (H - 1) / bin_size;
// NDC range depends on the ratio of W/H
// The shorter side from (H, W) is given an NDC range of 2.0 and
// the other side is scaled by the ratio of H:W.
const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f;
const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f;
// Size of half a pixel in NDC units is the NDC half range
// divided by the corresponding image dimension
const float half_pix_x = NDC_x_half_range / W;
const float half_pix_y = NDC_y_half_range / H;
// This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size)
// stored in shared memory that will track whether each elem in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size);
// Have each block handle a chunk of elements
const int chunks_per_batch = 1 + (E - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch; // batch index
const int chunk_idx = chunk % chunks_per_batch;
const int elem_chunk_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
const int64_t elem_start_idx = elem_first_idxs[batch_idx];
const int64_t elem_stop_idx = elem_start_idx + elems_per_batch[batch_idx];
// Have each thread handle a different face within the chunk
for (int e = threadIdx.x; e < chunk_size; e += blockDim.x) {
const int e_idx = elem_chunk_start_idx + e;
// Check that we are still within the same element of the batch
if (e_idx >= elem_stop_idx || e_idx < elem_start_idx) {
continue;
}
if (should_skip[e_idx]) {
continue;
}
const float xmin = bboxes[0 * E + e_idx];
const float xmax = bboxes[1 * E + e_idx];
const float ymin = bboxes[2 * E + e_idx];
const float ymax = bboxes[3 * E + e_idx];
// Brute-force search over all bins; TODO(T54294966) something smarter.
for (int by = 0; by < num_bins_y; ++by) {
// Y coordinate of the top and bottom of the bin.
// PixToNdc gives the location of the center of each pixel, so we
// need to add/subtract a half pixel to get the true extent of the bin.
// Reverse ordering of Y axis so that +Y is upwards in the image.
const float bin_y_min =
PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y;
const float bin_y_max =
PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y;
const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax);
for (int bx = 0; bx < num_bins_x; ++bx) {
// X coordinate of the left and right of the bin.
// Reverse ordering of x axis so that +X is left.
const float bin_x_max =
PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x;
const float bin_x_min =
PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x;
const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax);
if (y_overlap && x_overlap) {
binmask.set(by, bx, e);
}
}
}
}
__syncthreads();
// Now we have processed every elem in the current chunk. We need to
// count the number of elems in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x;
byx += blockDim.x) {
const int by = byx / num_bins_x;
const int bx = byx % num_bins_x;
const int count = binmask.count(by, bx);
const int elems_per_bin_idx =
batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx;
// This atomically increments the (global) number of elems found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_faces array for the
// elems in the current chunk that fall into this bin.
const int start = atomicAdd(elems_per_bin + elems_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_faces.
int next_idx = batch_idx * num_bins_y * num_bins_x * M +
by * num_bins_x * M + bx * M + start;
for (int e = 0; e < chunk_size; ++e) {
if (binmask.get(by, bx, e)) {
// TODO(T54296346) find the correct method for handling errors in
// CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin.
// Either decrease bin size or increase max_faces_per_bin
bin_elems[next_idx] = elem_chunk_start_idx + e;
next_idx++;
}
}
}
__syncthreads();
}
}
at::Tensor RasterizeCoarseCuda(
const at::Tensor& bboxes,
const at::Tensor& should_skip,
const at::Tensor& elem_first_idxs,
const at::Tensor& elems_per_batch,
const std::tuple<int, int> image_size,
const int bin_size,
const int max_elems_per_bin) {
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(bboxes.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int E = bboxes.size(1);
const int N = elems_per_batch.size(0);
const int M = max_elems_per_bin;
// Integer divide round up
const int num_bins_y = 1 + (H - 1) / bin_size;
const int num_bins_x = 1 + (W - 1) / bin_size;
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
std::stringstream ss;
ss << "In RasterizeCoarseCuda got num_bins_y: " << num_bins_y
<< ", num_bins_x: " << num_bins_x << ", "
<< "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = elems_per_batch.options().dtype(at::kInt);
at::Tensor elems_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts);
at::Tensor bin_elems = at::full({N, num_bins_y, num_bins_x, M}, -1, opts);
if (bin_elems.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return bin_elems;
}
const int chunk_size = 512;
const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
hipLaunchKernelGGL(( RasterizeCoarseCudaKernel), dim3(blocks), dim3(threads), shared_size, stream,
bboxes.contiguous().data_ptr<float>(),
should_skip.contiguous().data_ptr<bool>(),
elem_first_idxs.contiguous().data_ptr<int64_t>(),
elems_per_batch.contiguous().data_ptr<int64_t>(),
N,
E,
H,
W,
bin_size,
chunk_size,
M,
elems_per_bin.data_ptr<int32_t>(),
bin_elems.data_ptr<int32_t>());
AT_CUDA_CHECK(hipGetLastError());
return bin_elems;
}
at::Tensor RasterizeMeshesCoarseCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_face_first_idx,
const at::Tensor& num_faces_per_mesh,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int max_faces_per_bin) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_face_first_idx_t{
mesh_to_face_first_idx, "mesh_to_face_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3};
at::CheckedFrom c = "RasterizeMeshesCoarseCuda";
at::checkAllSameGPU(
c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(face_verts.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Allocate tensors for bboxes and should_skip
const int F = face_verts.size(0);
auto float_opts = face_verts.options().dtype(at::kFloat);
auto bool_opts = face_verts.options().dtype(at::kBool);
at::Tensor bboxes = at::empty({4, F}, float_opts);
at::Tensor should_skip = at::empty({F}, bool_opts);
// Launch kernel to compute triangle bboxes
const size_t blocks = 128;
const size_t threads = 256;
hipLaunchKernelGGL(( TriangleBoundingBoxKernel), dim3(blocks), dim3(threads), 0, stream,
face_verts.contiguous().data_ptr<float>(),
F,
blur_radius,
bboxes.contiguous().data_ptr<float>(),
should_skip.contiguous().data_ptr<bool>());
AT_CUDA_CHECK(hipGetLastError());
return RasterizeCoarseCuda(
bboxes,
should_skip,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
bin_size,
max_faces_per_bin);
}
at::Tensor RasterizePointsCoarseCuda(
const at::Tensor& points, // (P, 3)
const at::Tensor& cloud_to_packed_first_idx, // (N,)
const at::Tensor& num_points_per_cloud, // (N,)
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int bin_size,
const int max_points_per_bin) {
TORCH_CHECK(
points.ndimension() == 2 && points.size(1) == 3,
"points must have dimensions (num_points, 3)");
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
cloud_to_packed_first_idx_t{
cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2},
num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3};
at::CheckedFrom c = "RasterizePointsCoarseCuda";
at::checkAllSameGPU(
c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t});
// Set the device for the kernel launch based on the device of the input
at::hip::HIPGuardMasqueradingAsCUDA device_guard(points.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Allocate tensors for bboxes and should_skip
const int P = points.size(0);
auto float_opts = points.options().dtype(at::kFloat);
auto bool_opts = points.options().dtype(at::kBool);
at::Tensor bboxes = at::empty({4, P}, float_opts);
at::Tensor should_skip = at::empty({P}, bool_opts);
// Launch kernel to compute point bboxes
const size_t blocks = 128;
const size_t threads = 256;
hipLaunchKernelGGL(( PointBoundingBoxKernel), dim3(blocks), dim3(threads), 0, stream,
points.contiguous().data_ptr<float>(),
radius.contiguous().data_ptr<float>(),
P,
bboxes.contiguous().data_ptr<float>(),
should_skip.contiguous().data_ptr<bool>());
AT_CUDA_CHECK(hipGetLastError());
return RasterizeCoarseCuda(
bboxes,
should_skip,
cloud_to_packed_first_idx,
num_points_per_cloud,
image_size,
bin_size,
max_points_per_bin);
}
| 407ee35c6df36d1aaf9010c247234bbe3e0f13b6.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <float.h>
#include <math.h>
#include <tuple>
#include "rasterize_coarse/bitmask.cuh"
#include "rasterize_points/rasterization_utils.cuh"
#include "utils/float_math.cuh"
#include "utils/geometry_utils.cuh" // For kEpsilon -- gross
__global__ void TriangleBoundingBoxKernel(
const float* face_verts, // (F, 3, 3)
const int F,
const float blur_radius,
float* bboxes, // (4, F)
bool* skip_face) { // (F,)
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = blockDim.x * gridDim.x;
const float sqrt_radius = sqrt(blur_radius);
for (int f = tid; f < F; f += num_threads) {
const float v0x = face_verts[f * 9 + 0 * 3 + 0];
const float v0y = face_verts[f * 9 + 0 * 3 + 1];
const float v0z = face_verts[f * 9 + 0 * 3 + 2];
const float v1x = face_verts[f * 9 + 1 * 3 + 0];
const float v1y = face_verts[f * 9 + 1 * 3 + 1];
const float v1z = face_verts[f * 9 + 1 * 3 + 2];
const float v2x = face_verts[f * 9 + 2 * 3 + 0];
const float v2y = face_verts[f * 9 + 2 * 3 + 1];
const float v2z = face_verts[f * 9 + 2 * 3 + 2];
const float xmin = FloatMin3(v0x, v1x, v2x) - sqrt_radius;
const float xmax = FloatMax3(v0x, v1x, v2x) + sqrt_radius;
const float ymin = FloatMin3(v0y, v1y, v2y) - sqrt_radius;
const float ymax = FloatMax3(v0y, v1y, v2y) + sqrt_radius;
const float zmin = FloatMin3(v0z, v1z, v2z);
const bool skip = zmin < kEpsilon;
bboxes[0 * F + f] = xmin;
bboxes[1 * F + f] = xmax;
bboxes[2 * F + f] = ymin;
bboxes[3 * F + f] = ymax;
skip_face[f] = skip;
}
}
__global__ void PointBoundingBoxKernel(
const float* points, // (P, 3)
const float* radius, // (P,)
const int P,
float* bboxes, // (4, P)
bool* skip_points) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int num_threads = blockDim.x * gridDim.x;
for (int p = tid; p < P; p += num_threads) {
const float x = points[p * 3 + 0];
const float y = points[p * 3 + 1];
const float z = points[p * 3 + 2];
const float r = radius[p];
// TODO: change to kEpsilon to match triangles?
const bool skip = z < 0;
bboxes[0 * P + p] = x - r;
bboxes[1 * P + p] = x + r;
bboxes[2 * P + p] = y - r;
bboxes[3 * P + p] = y + r;
skip_points[p] = skip;
}
}
__global__ void RasterizeCoarseCudaKernel(
const float* bboxes, // (4, E) (xmin, xmax, ymin, ymax)
const bool* should_skip, // (E,)
const int64_t* elem_first_idxs,
const int64_t* elems_per_batch,
const int N,
const int E,
const int H,
const int W,
const int bin_size,
const int chunk_size,
const int max_elem_per_bin,
int* elems_per_bin,
int* bin_elems) {
extern __shared__ char sbuf[];
const int M = max_elem_per_bin;
// Integer divide round up
const int num_bins_x = 1 + (W - 1) / bin_size;
const int num_bins_y = 1 + (H - 1) / bin_size;
// NDC range depends on the ratio of W/H
// The shorter side from (H, W) is given an NDC range of 2.0 and
// the other side is scaled by the ratio of H:W.
const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f;
const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f;
// Size of half a pixel in NDC units is the NDC half range
// divided by the corresponding image dimension
const float half_pix_x = NDC_x_half_range / W;
const float half_pix_y = NDC_y_half_range / H;
// This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size)
// stored in shared memory that will track whether each elem in the chunk
// falls into each bin of the image.
BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size);
// Have each block handle a chunk of elements
const int chunks_per_batch = 1 + (E - 1) / chunk_size;
const int num_chunks = N * chunks_per_batch;
for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) {
const int batch_idx = chunk / chunks_per_batch; // batch index
const int chunk_idx = chunk % chunks_per_batch;
const int elem_chunk_start_idx = chunk_idx * chunk_size;
binmask.block_clear();
const int64_t elem_start_idx = elem_first_idxs[batch_idx];
const int64_t elem_stop_idx = elem_start_idx + elems_per_batch[batch_idx];
// Have each thread handle a different face within the chunk
for (int e = threadIdx.x; e < chunk_size; e += blockDim.x) {
const int e_idx = elem_chunk_start_idx + e;
// Check that we are still within the same element of the batch
if (e_idx >= elem_stop_idx || e_idx < elem_start_idx) {
continue;
}
if (should_skip[e_idx]) {
continue;
}
const float xmin = bboxes[0 * E + e_idx];
const float xmax = bboxes[1 * E + e_idx];
const float ymin = bboxes[2 * E + e_idx];
const float ymax = bboxes[3 * E + e_idx];
// Brute-force search over all bins; TODO(T54294966) something smarter.
for (int by = 0; by < num_bins_y; ++by) {
// Y coordinate of the top and bottom of the bin.
// PixToNdc gives the location of the center of each pixel, so we
// need to add/subtract a half pixel to get the true extent of the bin.
// Reverse ordering of Y axis so that +Y is upwards in the image.
const float bin_y_min =
PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y;
const float bin_y_max =
PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y;
const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax);
for (int bx = 0; bx < num_bins_x; ++bx) {
// X coordinate of the left and right of the bin.
// Reverse ordering of x axis so that +X is left.
const float bin_x_max =
PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x;
const float bin_x_min =
PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x;
const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax);
if (y_overlap && x_overlap) {
binmask.set(by, bx, e);
}
}
}
}
__syncthreads();
// Now we have processed every elem in the current chunk. We need to
// count the number of elems in each bin so we can write the indices
// out to global memory. We have each thread handle a different bin.
for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x;
byx += blockDim.x) {
const int by = byx / num_bins_x;
const int bx = byx % num_bins_x;
const int count = binmask.count(by, bx);
const int elems_per_bin_idx =
batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx;
// This atomically increments the (global) number of elems found
// in the current bin, and gets the previous value of the counter;
// this effectively allocates space in the bin_faces array for the
// elems in the current chunk that fall into this bin.
const int start = atomicAdd(elems_per_bin + elems_per_bin_idx, count);
// Now loop over the binmask and write the active bits for this bin
// out to bin_faces.
int next_idx = batch_idx * num_bins_y * num_bins_x * M +
by * num_bins_x * M + bx * M + start;
for (int e = 0; e < chunk_size; ++e) {
if (binmask.get(by, bx, e)) {
// TODO(T54296346) find the correct method for handling errors in
// CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin.
// Either decrease bin size or increase max_faces_per_bin
bin_elems[next_idx] = elem_chunk_start_idx + e;
next_idx++;
}
}
}
__syncthreads();
}
}
at::Tensor RasterizeCoarseCuda(
const at::Tensor& bboxes,
const at::Tensor& should_skip,
const at::Tensor& elem_first_idxs,
const at::Tensor& elems_per_batch,
const std::tuple<int, int> image_size,
const int bin_size,
const int max_elems_per_bin) {
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(bboxes.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
const int H = std::get<0>(image_size);
const int W = std::get<1>(image_size);
const int E = bboxes.size(1);
const int N = elems_per_batch.size(0);
const int M = max_elems_per_bin;
// Integer divide round up
const int num_bins_y = 1 + (H - 1) / bin_size;
const int num_bins_x = 1 + (W - 1) / bin_size;
if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) {
std::stringstream ss;
ss << "In RasterizeCoarseCuda got num_bins_y: " << num_bins_y
<< ", num_bins_x: " << num_bins_x << ", "
<< "; that's too many!";
AT_ERROR(ss.str());
}
auto opts = elems_per_batch.options().dtype(at::kInt);
at::Tensor elems_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts);
at::Tensor bin_elems = at::full({N, num_bins_y, num_bins_x, M}, -1, opts);
if (bin_elems.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return bin_elems;
}
const int chunk_size = 512;
const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8;
const size_t blocks = 64;
const size_t threads = 512;
RasterizeCoarseCudaKernel<<<blocks, threads, shared_size, stream>>>(
bboxes.contiguous().data_ptr<float>(),
should_skip.contiguous().data_ptr<bool>(),
elem_first_idxs.contiguous().data_ptr<int64_t>(),
elems_per_batch.contiguous().data_ptr<int64_t>(),
N,
E,
H,
W,
bin_size,
chunk_size,
M,
elems_per_bin.data_ptr<int32_t>(),
bin_elems.data_ptr<int32_t>());
AT_CUDA_CHECK(cudaGetLastError());
return bin_elems;
}
at::Tensor RasterizeMeshesCoarseCuda(
const at::Tensor& face_verts,
const at::Tensor& mesh_to_face_first_idx,
const at::Tensor& num_faces_per_mesh,
const std::tuple<int, int> image_size,
const float blur_radius,
const int bin_size,
const int max_faces_per_bin) {
TORCH_CHECK(
face_verts.ndimension() == 3 && face_verts.size(1) == 3 &&
face_verts.size(2) == 3,
"face_verts must have dimensions (num_faces, 3, 3)");
// Check inputs are on the same device
at::TensorArg face_verts_t{face_verts, "face_verts", 1},
mesh_to_face_first_idx_t{
mesh_to_face_first_idx, "mesh_to_face_first_idx", 2},
num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3};
at::CheckedFrom c = "RasterizeMeshesCoarseCuda";
at::checkAllSameGPU(
c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(face_verts.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Allocate tensors for bboxes and should_skip
const int F = face_verts.size(0);
auto float_opts = face_verts.options().dtype(at::kFloat);
auto bool_opts = face_verts.options().dtype(at::kBool);
at::Tensor bboxes = at::empty({4, F}, float_opts);
at::Tensor should_skip = at::empty({F}, bool_opts);
// Launch kernel to compute triangle bboxes
const size_t blocks = 128;
const size_t threads = 256;
TriangleBoundingBoxKernel<<<blocks, threads, 0, stream>>>(
face_verts.contiguous().data_ptr<float>(),
F,
blur_radius,
bboxes.contiguous().data_ptr<float>(),
should_skip.contiguous().data_ptr<bool>());
AT_CUDA_CHECK(cudaGetLastError());
return RasterizeCoarseCuda(
bboxes,
should_skip,
mesh_to_face_first_idx,
num_faces_per_mesh,
image_size,
bin_size,
max_faces_per_bin);
}
at::Tensor RasterizePointsCoarseCuda(
const at::Tensor& points, // (P, 3)
const at::Tensor& cloud_to_packed_first_idx, // (N,)
const at::Tensor& num_points_per_cloud, // (N,)
const std::tuple<int, int> image_size,
const at::Tensor& radius,
const int bin_size,
const int max_points_per_bin) {
TORCH_CHECK(
points.ndimension() == 2 && points.size(1) == 3,
"points must have dimensions (num_points, 3)");
// Check inputs are on the same device
at::TensorArg points_t{points, "points", 1},
cloud_to_packed_first_idx_t{
cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2},
num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3};
at::CheckedFrom c = "RasterizePointsCoarseCuda";
at::checkAllSameGPU(
c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t});
// Set the device for the kernel launch based on the device of the input
at::cuda::CUDAGuard device_guard(points.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
// Allocate tensors for bboxes and should_skip
const int P = points.size(0);
auto float_opts = points.options().dtype(at::kFloat);
auto bool_opts = points.options().dtype(at::kBool);
at::Tensor bboxes = at::empty({4, P}, float_opts);
at::Tensor should_skip = at::empty({P}, bool_opts);
// Launch kernel to compute point bboxes
const size_t blocks = 128;
const size_t threads = 256;
PointBoundingBoxKernel<<<blocks, threads, 0, stream>>>(
points.contiguous().data_ptr<float>(),
radius.contiguous().data_ptr<float>(),
P,
bboxes.contiguous().data_ptr<float>(),
should_skip.contiguous().data_ptr<bool>());
AT_CUDA_CHECK(cudaGetLastError());
return RasterizeCoarseCuda(
bboxes,
should_skip,
cloud_to_packed_first_idx,
num_points_per_cloud,
image_size,
bin_size,
max_points_per_bin);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.