hip_filename stringlengths 5 84 | hip_content stringlengths 79 9.69M | cuda_filename stringlengths 4 83 | cuda_content stringlengths 19 9.69M |
|---|---|---|---|
connectivity.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "scc_matrix.cuh"
#include "weak_cc_hip.cuh"
#include <thrust/sequence.h>
#include <algorithms.hpp>
#include <cstdint>
#include <graph.hpp>
#include <iostream>
#include <type_traits>
#include "utilities/error.hpp"
#include "utilities/graph_utils.cuh"
#include "topology/topology.cuh"
namespace cugraph {
namespace detail {
/**
* @brief Compute connected components.
* The weak version (for undirected graphs, only) was imported from cuML.
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* The strong version (for directed or undirected graphs) is based on:
* [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra"
*
* C = I | A | A^2 |...| A^k
* where matrix multiplication is via semi-ring:
* (combine, reduce) == (&, |) (bitwise ops)
* Then: X = C & transpose(C); and finally, apply get_labels(X);
*
*
* @tparam IndexT the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @param graph input graph; assumed undirected for weakly CC [in]
* @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in]
* @param stream the cuda stream [in]
*/
template <typename VT, typename ET, typename WT, int TPB_X = 32>
std::enable_if_t<std::is_signed<VT>::value> connected_components_impl(
GraphCSRView<VT, ET, WT> const &graph,
cugraph_cc_t connectivity_type,
VT *labels,
hipStream_t stream)
{
using ByteT = unsigned char; // minimum addressable unit
CUGRAPH_EXPECTS(graph.offsets != nullptr, "Invalid input argument: graph.offsets is nullptr");
CUGRAPH_EXPECTS(graph.indices != nullptr, "Invalid input argument: graph.indices is nullptr");
VT nrows = graph.number_of_vertices;
if (connectivity_type == cugraph_cc_t::CUGRAPH_WEAK) {
MLCommon::Sparse::weak_cc_entry<VT, ET, TPB_X>(labels,
graph.offsets,
graph.indices,
graph.number_of_edges,
graph.number_of_vertices,
stream);
} else {
SCC_Data<ByteT, VT> sccd(nrows, graph.offsets, graph.indices);
auto num_iters = sccd.run_scc(labels);
}
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void connected_components(GraphCSRView<VT, ET, WT> const &graph,
cugraph_cc_t connectivity_type,
VT *labels)
{
hipStream_t stream{nullptr};
CUGRAPH_EXPECTS(labels != nullptr, "Invalid input argument: labels parameter is NULL");
return detail::connected_components_impl<VT, ET, WT>(graph, connectivity_type, labels, stream);
}
template void connected_components<int32_t, int32_t, float>(
GraphCSRView<int32_t, int32_t, float> const &, cugraph_cc_t, int32_t *);
template void connected_components<int64_t, int64_t, float>(
GraphCSRView<int64_t, int64_t, float> const &, cugraph_cc_t, int64_t *);
} // namespace cugraph
| connectivity.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "scc_matrix.cuh"
#include "weak_cc.cuh"
#include <thrust/sequence.h>
#include <algorithms.hpp>
#include <cstdint>
#include <graph.hpp>
#include <iostream>
#include <type_traits>
#include "utilities/error.hpp"
#include "utilities/graph_utils.cuh"
#include "topology/topology.cuh"
namespace cugraph {
namespace detail {
/**
* @brief Compute connected components.
* The weak version (for undirected graphs, only) was imported from cuML.
* This implementation comes from [1] and solves component labeling problem in
* parallel on CSR-indexes based upon the vertex degree and adjacency graph.
*
* [1] Hawick, K.A et al, 2010. "Parallel graph component labelling with GPUs and CUDA"
*
* The strong version (for directed or undirected graphs) is based on:
* [2] Gilbert, J. et al, 2011. "Graph Algorithms in the Language of Linear Algebra"
*
* C = I | A | A^2 |...| A^k
* where matrix multiplication is via semi-ring:
* (combine, reduce) == (&, |) (bitwise ops)
* Then: X = C & transpose(C); and finally, apply get_labels(X);
*
*
* @tparam IndexT the numeric type of non-floating point elements
* @tparam TPB_X the threads to use per block when configuring the kernel
* @param graph input graph; assumed undirected for weakly CC [in]
* @param connectivity_type CUGRAPH_WEAK or CUGRAPH_STRONG [in]
* @param stream the cuda stream [in]
*/
template <typename VT, typename ET, typename WT, int TPB_X = 32>
std::enable_if_t<std::is_signed<VT>::value> connected_components_impl(
GraphCSRView<VT, ET, WT> const &graph,
cugraph_cc_t connectivity_type,
VT *labels,
cudaStream_t stream)
{
using ByteT = unsigned char; // minimum addressable unit
CUGRAPH_EXPECTS(graph.offsets != nullptr, "Invalid input argument: graph.offsets is nullptr");
CUGRAPH_EXPECTS(graph.indices != nullptr, "Invalid input argument: graph.indices is nullptr");
VT nrows = graph.number_of_vertices;
if (connectivity_type == cugraph_cc_t::CUGRAPH_WEAK) {
MLCommon::Sparse::weak_cc_entry<VT, ET, TPB_X>(labels,
graph.offsets,
graph.indices,
graph.number_of_edges,
graph.number_of_vertices,
stream);
} else {
SCC_Data<ByteT, VT> sccd(nrows, graph.offsets, graph.indices);
auto num_iters = sccd.run_scc(labels);
}
}
} // namespace detail
template <typename VT, typename ET, typename WT>
void connected_components(GraphCSRView<VT, ET, WT> const &graph,
cugraph_cc_t connectivity_type,
VT *labels)
{
cudaStream_t stream{nullptr};
CUGRAPH_EXPECTS(labels != nullptr, "Invalid input argument: labels parameter is NULL");
return detail::connected_components_impl<VT, ET, WT>(graph, connectivity_type, labels, stream);
}
template void connected_components<int32_t, int32_t, float>(
GraphCSRView<int32_t, int32_t, float> const &, cugraph_cc_t, int32_t *);
template void connected_components<int64_t, int64_t, float>(
GraphCSRView<int64_t, int64_t, float> const &, cugraph_cc_t, int64_t *);
} // namespace cugraph
|
1c1630d3ce2e07a42debeec18bac0a1e454aa708.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popc_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( popcll_kernel), dim3(CAFFE_GET_BLOCKS(n)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(round, y[index] = __int2float_rn(__float2int_rn(x[index])));
} // namespace caffe
| 1c1630d3ce2e07a42debeec18bac0a1e454aa708.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = signbit(x[index]));
__global__ void popc_kernel(const int n, const float* a,
const float* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popc(static_cast<uint32_t>(a[index]) ^
static_cast<uint32_t>(b[index]));
}
}
__global__ void popcll_kernel(const int n, const double* a,
const double* b, uint8_t* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = __popcll(static_cast<uint64_t>(a[index]) ^
static_cast<uint64_t>(b[index]));
}
}
template <>
uint32_t caffe_gpu_hamming_distance<float>(const int n, const float* x,
const float* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popc_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
(uint32_t) 0, thrust::plus<uint32_t>());
}
template <>
uint32_t caffe_gpu_hamming_distance<double>(const int n, const double* x,
const double* y) {
// TODO: Fix caffe_gpu_hamming_distance (see failing unit test
// TestHammingDistanceGPU in test_math_functions.cpp).
NOT_IMPLEMENTED;
thrust::device_vector<uint8_t> popcounts(n);
// NOLINT_NEXT_LINE(whitespace/operators)
popcll_kernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS>>>(
n, x, y, thrust::raw_pointer_cast(popcounts.data()));
return thrust::reduce(popcounts.begin(), popcounts.end(),
/* NOLINT_NEXT_LINE(build/include_what_you_use) */
(uint32_t) 0, thrust::plus<uint32_t>());
}
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(round, y[index] = __int2float_rn(__float2int_rn(x[index])));
} // namespace caffe
|
77d06cbf8c2c22792b1c84483b44bca46c143335.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <cfloat>
#include "caffe_cuda.h"
#include "region_common.hpp"
namespace {
__device__ void stack_push(double* parent_p_data, int* parent_argmax_data, int* g_data,
int& stack_size,
double p, int argmax, int g) {
parent_p_data[stack_size] = p;
parent_argmax_data[stack_size] = argmax;
g_data[stack_size] = g;
stack_size++;
}
__device__ void stack_pop(const double* parent_p_data, const int* parent_argmax_data, const int* g_data,
int& stack_size,
double& p, int& argmax, int& g) {
assert(stack_size > 0);
stack_size--;
p = parent_p_data[stack_size];
argmax = parent_argmax_data[stack_size];
g = g_data[stack_size];
}
template <typename scalar_t>
__device__ void predict_tree_stack(
int outer_num, int channels, int inner_num,
bool append_max,
float threshold,
const int* group_offset_data, const int* group_size_data, const int* child_data, const int* child_size_data,
double* parent_p_data, int* parent_argmax_data, int* g_data,
const scalar_t* obj_data, const scalar_t* prob_data,
int max_stack_size, int n, int s, int g,
scalar_t* top_data,
bool output_tree_path) {
int stack_size = 0;
const int top_channels = append_max ? (channels + 1) : channels;
scalar_t obj = obj_data ? obj_data[n * inner_num + s] : 1;
double root_p = output_tree_path ? obj : 1.0;
threshold = output_tree_path ? (threshold * obj) : threshold;
stack_push(parent_p_data, parent_argmax_data, g_data,
stack_size,
root_p, -1, g);
while (stack_size) {
assert(stack_size <= max_stack_size);
double parent_p;
int parent_argmax;
int g;
stack_pop(parent_p_data, parent_argmax_data, g_data,
stack_size,
parent_p, parent_argmax, g);
double p = parent_p;
int argmax = 0;
{
scalar_t maxval = -FLT_MAX;
auto offset = group_offset_data[g];
argmax = offset;
auto size = group_size_data[g];
for (int j = 0; j < size; ++j) {
scalar_t prob = prob_data[(n * channels + offset + j) * inner_num + s];
if (prob > maxval) {
argmax = offset + j;
maxval = prob;
}
}
p *= maxval;
}
if (p > threshold) {
if (output_tree_path) {
top_data[(n * top_channels + argmax) * inner_num + s] = static_cast<scalar_t>(p);
}
g = child_data[argmax]; // initial child group
if (g >= 0) {
// if there is any child, descend further
int sg_count = child_size_data[argmax] + 1;
for (int sg = 0; sg < sg_count; ++sg) {
stack_push(parent_p_data, parent_argmax_data, g_data,
stack_size,
p, argmax, g + sg);
}
continue;
}
} else {
argmax = parent_argmax;
if (argmax < 0)
continue;
p = parent_p;
}
scalar_t node_p = 0;
if (!output_tree_path) {
node_p = obj_data ? obj : static_cast<scalar_t>(p);
top_data[(n * top_channels + argmax) * inner_num + s] = node_p;
}
if (append_max) {
int max_idx = (n * top_channels + channels) * inner_num + s;
if (output_tree_path) {
// in this case, we use the obj as the max value, which will be
// used as the indicator for class-independent NMS. or the
// maximum value will always be the ones in the root.
// gradually, we might remove the support of append_max since
// it is more like a legacy strategy
top_data[max_idx] = obj;
} else {
if (node_p > top_data[max_idx]) {
top_data[max_idx] = node_p;
}
}
}
}
}
template <typename scalar_t>
__global__ void kernel_smt_prediction(
int outer_num, int channels, int inner_num, int root_size,
bool append_max,
float threshold,
const int* group_offset_data, const int* group_size_data, const int* child_data, const int* child_size_data,
double* parent_p_data, int* parent_argmax_data, int* g_data,
const scalar_t* obj_data, const scalar_t* prob_data,
int max_stack_size,
scalar_t* top_data,
bool output_tree_path) {
CUDA_KERNEL_LOOP(index, outer_num * root_size * inner_num) {
const int s = index % inner_num;
const int g = (index / inner_num) % root_size;
const int n = (index / inner_num) / root_size;
predict_tree_stack(outer_num, channels, inner_num,
append_max,
threshold,
group_offset_data, group_size_data, child_data, child_size_data,
&parent_p_data[index * max_stack_size], &parent_argmax_data[index * max_stack_size], &g_data[index * max_stack_size],
obj_data, prob_data,
max_stack_size, n, s, g,
top_data,
output_tree_path);
}
}
} // namespace
std::vector<at::Tensor> smtpred_cuda_forward(
at::Tensor conf, at::Tensor obj,
at::Tensor group_offset, at::Tensor group_size, at::Tensor child, at::Tensor child_size,
float threshold, bool output_tree_path, bool append_max,
int root_size, int stack_size,
int outer_num, int inner_num
) {
root_size++;
// Intermediate variables
auto stack_parent_p = at::empty({outer_num, root_size, inner_num, stack_size}, at::CUDA(at::kDouble));
auto stack_parent_argmax = at::empty({outer_num, root_size, inner_num, stack_size}, at::CUDA(at::kInt));
auto stack_g = at::empty({outer_num, root_size, inner_num, stack_size}, at::CUDA(at::kInt));
auto shape = conf.sizes().vec();
int channels = shape[1];
if (append_max)
shape[1] = channels + 1;
auto top = at::zeros(shape, conf.type());
AT_DISPATCH_FLOATING_TYPES(conf.type(), "smtpred_cuda_forward::kernel_smt_prediction", ([&] {
scalar_t* obj_data = nullptr;
if (obj.numel())
obj_data = obj.data<scalar_t>();
hipLaunchKernelGGL(( kernel_smt_prediction<scalar_t>), dim3(GET_BLOCKS(outer_num * root_size * inner_num)), dim3(CUDA_NUM_THREADS), 0, 0,
outer_num, channels, inner_num, root_size,
append_max,
threshold,
group_offset.data<int>(), group_size.data<int>(), child.data<int>(), child_size.data<int>(),
stack_parent_p.data<double>(), stack_parent_argmax.data<int>(), stack_g.data<int>(),
obj_data, conf.data<scalar_t>(),
stack_size,
top.data<scalar_t>(),
output_tree_path);
}));
return {top};
}
| 77d06cbf8c2c22792b1c84483b44bca46c143335.cu | #include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <cfloat>
#include "caffe_cuda.h"
#include "region_common.hpp"
namespace {
__device__ void stack_push(double* parent_p_data, int* parent_argmax_data, int* g_data,
int& stack_size,
double p, int argmax, int g) {
parent_p_data[stack_size] = p;
parent_argmax_data[stack_size] = argmax;
g_data[stack_size] = g;
stack_size++;
}
__device__ void stack_pop(const double* parent_p_data, const int* parent_argmax_data, const int* g_data,
int& stack_size,
double& p, int& argmax, int& g) {
assert(stack_size > 0);
stack_size--;
p = parent_p_data[stack_size];
argmax = parent_argmax_data[stack_size];
g = g_data[stack_size];
}
template <typename scalar_t>
__device__ void predict_tree_stack(
int outer_num, int channels, int inner_num,
bool append_max,
float threshold,
const int* group_offset_data, const int* group_size_data, const int* child_data, const int* child_size_data,
double* parent_p_data, int* parent_argmax_data, int* g_data,
const scalar_t* obj_data, const scalar_t* prob_data,
int max_stack_size, int n, int s, int g,
scalar_t* top_data,
bool output_tree_path) {
int stack_size = 0;
const int top_channels = append_max ? (channels + 1) : channels;
scalar_t obj = obj_data ? obj_data[n * inner_num + s] : 1;
double root_p = output_tree_path ? obj : 1.0;
threshold = output_tree_path ? (threshold * obj) : threshold;
stack_push(parent_p_data, parent_argmax_data, g_data,
stack_size,
root_p, -1, g);
while (stack_size) {
assert(stack_size <= max_stack_size);
double parent_p;
int parent_argmax;
int g;
stack_pop(parent_p_data, parent_argmax_data, g_data,
stack_size,
parent_p, parent_argmax, g);
double p = parent_p;
int argmax = 0;
{
scalar_t maxval = -FLT_MAX;
auto offset = group_offset_data[g];
argmax = offset;
auto size = group_size_data[g];
for (int j = 0; j < size; ++j) {
scalar_t prob = prob_data[(n * channels + offset + j) * inner_num + s];
if (prob > maxval) {
argmax = offset + j;
maxval = prob;
}
}
p *= maxval;
}
if (p > threshold) {
if (output_tree_path) {
top_data[(n * top_channels + argmax) * inner_num + s] = static_cast<scalar_t>(p);
}
g = child_data[argmax]; // initial child group
if (g >= 0) {
// if there is any child, descend further
int sg_count = child_size_data[argmax] + 1;
for (int sg = 0; sg < sg_count; ++sg) {
stack_push(parent_p_data, parent_argmax_data, g_data,
stack_size,
p, argmax, g + sg);
}
continue;
}
} else {
argmax = parent_argmax;
if (argmax < 0)
continue;
p = parent_p;
}
scalar_t node_p = 0;
if (!output_tree_path) {
node_p = obj_data ? obj : static_cast<scalar_t>(p);
top_data[(n * top_channels + argmax) * inner_num + s] = node_p;
}
if (append_max) {
int max_idx = (n * top_channels + channels) * inner_num + s;
if (output_tree_path) {
// in this case, we use the obj as the max value, which will be
// used as the indicator for class-independent NMS. or the
// maximum value will always be the ones in the root.
// gradually, we might remove the support of append_max since
// it is more like a legacy strategy
top_data[max_idx] = obj;
} else {
if (node_p > top_data[max_idx]) {
top_data[max_idx] = node_p;
}
}
}
}
}
template <typename scalar_t>
__global__ void kernel_smt_prediction(
int outer_num, int channels, int inner_num, int root_size,
bool append_max,
float threshold,
const int* group_offset_data, const int* group_size_data, const int* child_data, const int* child_size_data,
double* parent_p_data, int* parent_argmax_data, int* g_data,
const scalar_t* obj_data, const scalar_t* prob_data,
int max_stack_size,
scalar_t* top_data,
bool output_tree_path) {
CUDA_KERNEL_LOOP(index, outer_num * root_size * inner_num) {
const int s = index % inner_num;
const int g = (index / inner_num) % root_size;
const int n = (index / inner_num) / root_size;
predict_tree_stack(outer_num, channels, inner_num,
append_max,
threshold,
group_offset_data, group_size_data, child_data, child_size_data,
&parent_p_data[index * max_stack_size], &parent_argmax_data[index * max_stack_size], &g_data[index * max_stack_size],
obj_data, prob_data,
max_stack_size, n, s, g,
top_data,
output_tree_path);
}
}
} // namespace
std::vector<at::Tensor> smtpred_cuda_forward(
at::Tensor conf, at::Tensor obj,
at::Tensor group_offset, at::Tensor group_size, at::Tensor child, at::Tensor child_size,
float threshold, bool output_tree_path, bool append_max,
int root_size, int stack_size,
int outer_num, int inner_num
) {
root_size++;
// Intermediate variables
auto stack_parent_p = at::empty({outer_num, root_size, inner_num, stack_size}, at::CUDA(at::kDouble));
auto stack_parent_argmax = at::empty({outer_num, root_size, inner_num, stack_size}, at::CUDA(at::kInt));
auto stack_g = at::empty({outer_num, root_size, inner_num, stack_size}, at::CUDA(at::kInt));
auto shape = conf.sizes().vec();
int channels = shape[1];
if (append_max)
shape[1] = channels + 1;
auto top = at::zeros(shape, conf.type());
AT_DISPATCH_FLOATING_TYPES(conf.type(), "smtpred_cuda_forward::kernel_smt_prediction", ([&] {
scalar_t* obj_data = nullptr;
if (obj.numel())
obj_data = obj.data<scalar_t>();
kernel_smt_prediction<scalar_t><<<GET_BLOCKS(outer_num * root_size * inner_num), CUDA_NUM_THREADS>>>(
outer_num, channels, inner_num, root_size,
append_max,
threshold,
group_offset.data<int>(), group_size.data<int>(), child.data<int>(), child_size.data<int>(),
stack_parent_p.data<double>(), stack_parent_argmax.data<int>(), stack_g.data<int>(),
obj_data, conf.data<scalar_t>(),
stack_size,
top.data<scalar_t>(),
output_tree_path);
}));
return {top};
}
|
c10fd9d1c0197ea41b6921a63d213158e4eb749c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.hip"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
#if TEST_MODE
printf("\n***Running in test mode***\n"); fflush(stdout);
#endif
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
Matrix M_h, N_h, P_h; // M: filter, N: input image, P: output image
Matrix N_d, P_d;
unsigned imageHeight, imageWidth;
hipError_t cuda_ret;
//dim3 dim_grid, dim_block;
/* Read image dimensions */
if (argc == 1) {
imageHeight = 600;
imageWidth = 1000;
} else if (argc == 2) {
imageHeight = atoi(argv[1]);
imageWidth = atoi(argv[1]);
} else if (argc == 3) {
imageHeight = atoi(argv[1]);
imageWidth = atoi(argv[2]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./convolution # Image is 600 x 1000"
"\n Usage: ./convolution <m> # Image is m x m"
"\n Usage: ./convolution <m> <n> # Image is m x n"
"\n");
exit(0);
}
/* Allocate host memory */
M_h = allocateMatrix(FILTER_SIZE, FILTER_SIZE);
N_h = allocateMatrix(imageHeight, imageWidth);
P_h = allocateMatrix(imageHeight, imageWidth);
/* Initialize filter and images */
initMatrix(M_h);
initMatrix(N_h);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Image: %u x %u\n", imageHeight, imageWidth);
printf(" Mask: %u x %u\n", FILTER_SIZE, FILTER_SIZE);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
N_d = allocateDeviceMatrix(imageHeight, imageWidth);
P_d = allocateDeviceMatrix(imageHeight, imageWidth);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
/* Copy image to device global memory */
copyToDeviceMatrix(N_d, N_h);
/* Copy mask to device constant memory */
// INSERT CODE HERE
hipMemcpyToSymbol(M_c, M_h.elements, FILTER_SIZE*FILTER_SIZE*sizeof(float));
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
// INSERT CODE HERE
// Use OUTPUT_TILE_SIZE and INPUT_TILE_SIZE defined in support.h
int BLOCK_SIZE = INPUT_TILE_SIZE;
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 dim_grid(((unsigned int) ceil(N_d.width/(float)OUTPUT_TILE_SIZE)),
((unsigned int) ceil(N_d.height/(float)OUTPUT_TILE_SIZE)),
1);
hipLaunchKernelGGL(( convolution), dim3(dim_grid), dim3(dim_block), 0, 0, N_d, P_d);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
copyFromDeviceMatrix(P_h, P_d);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
#if TEST_MODE
printf("\nResult:\n");
for(int row = 0; row < P_h.height; ++row) {
for(int col = 0; col < P_h.width; ++col) {
printf("%.2f ", P_h.elements[row*P_h.width + col]);
}
printf("\n");
}
#endif
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(M_h, N_h, P_h);
// Free memory ------------------------------------------------------------
freeMatrix(M_h);
freeMatrix(N_h);
freeMatrix(P_h);
freeDeviceMatrix(N_d);
freeDeviceMatrix(P_d);
return 0;
}
| c10fd9d1c0197ea41b6921a63d213158e4eb749c.cu | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include "support.h"
#include "kernel.cu"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
#if TEST_MODE
printf("\n***Running in test mode***\n"); fflush(stdout);
#endif
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
Matrix M_h, N_h, P_h; // M: filter, N: input image, P: output image
Matrix N_d, P_d;
unsigned imageHeight, imageWidth;
cudaError_t cuda_ret;
//dim3 dim_grid, dim_block;
/* Read image dimensions */
if (argc == 1) {
imageHeight = 600;
imageWidth = 1000;
} else if (argc == 2) {
imageHeight = atoi(argv[1]);
imageWidth = atoi(argv[1]);
} else if (argc == 3) {
imageHeight = atoi(argv[1]);
imageWidth = atoi(argv[2]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./convolution # Image is 600 x 1000"
"\n Usage: ./convolution <m> # Image is m x m"
"\n Usage: ./convolution <m> <n> # Image is m x n"
"\n");
exit(0);
}
/* Allocate host memory */
M_h = allocateMatrix(FILTER_SIZE, FILTER_SIZE);
N_h = allocateMatrix(imageHeight, imageWidth);
P_h = allocateMatrix(imageHeight, imageWidth);
/* Initialize filter and images */
initMatrix(M_h);
initMatrix(N_h);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Image: %u x %u\n", imageHeight, imageWidth);
printf(" Mask: %u x %u\n", FILTER_SIZE, FILTER_SIZE);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
N_d = allocateDeviceMatrix(imageHeight, imageWidth);
P_d = allocateDeviceMatrix(imageHeight, imageWidth);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
/* Copy image to device global memory */
copyToDeviceMatrix(N_d, N_h);
/* Copy mask to device constant memory */
// INSERT CODE HERE
cudaMemcpyToSymbol(M_c, M_h.elements, FILTER_SIZE*FILTER_SIZE*sizeof(float));
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
// INSERT CODE HERE
// Use OUTPUT_TILE_SIZE and INPUT_TILE_SIZE defined in support.h
int BLOCK_SIZE = INPUT_TILE_SIZE;
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 dim_grid(((unsigned int) ceil(N_d.width/(float)OUTPUT_TILE_SIZE)),
((unsigned int) ceil(N_d.height/(float)OUTPUT_TILE_SIZE)),
1);
convolution<<<dim_grid, dim_block>>>(N_d, P_d);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
copyFromDeviceMatrix(P_h, P_d);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
#if TEST_MODE
printf("\nResult:\n");
for(int row = 0; row < P_h.height; ++row) {
for(int col = 0; col < P_h.width; ++col) {
printf("%.2f ", P_h.elements[row*P_h.width + col]);
}
printf("\n");
}
#endif
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(M_h, N_h, P_h);
// Free memory ------------------------------------------------------------
freeMatrix(M_h);
freeMatrix(N_h);
freeMatrix(P_h);
freeDeviceMatrix(N_d);
freeDeviceMatrix(P_d);
return 0;
}
|
f7e68c35d2ecdb71cbf50b659f03cfe9624b404b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
#pragma unroll 100
// Excessive Addition access
for(unsigned k=0; k<N;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
// Value1=I1*I2;
// Value3=Value1*I1;
// Value2=Value3*Value1;
// Value3*=Value2;
// Value1*=Value2;
// Value3*=Value1;
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( hipMalloc((void**)&d_A, size) );
checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
hipEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(hipEventRecord(start));
hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, iterations);
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
checkCudaErrors(hipEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
hipDeviceSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
hipFree(d_A);
if (d_B)
hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| f7e68c35d2ecdb71cbf50b659f03cfe9624b404b.cu | #include <stdio.h>
#include <stdlib.h>
//#include <cutil.h>
// Includes
//#include <stdio.h>
// includes, project
//#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 640
//#define ITERATIONS 40
//#include "../include/ContAcq-IntClk.h"
// Variables
float* h_A;
float* h_B;
float* h_C;
float* d_A;
float* d_B;
float* d_C;
//bool noprompt = false;
//unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(float*, int);
//void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
__global__ void PowerKernal2(const float* A, const float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
float Value1;
float Value2;
float Value3;
float Value;
float I1=A[i];
float I2=B[i];
#pragma unroll 100
// Excessive Addition access
for(unsigned k=0; k<N;k++) {
Value1=I1*I2;
Value3=I1*I2;
Value1*=Value2;
Value1*=Value2;
Value2=Value3*Value1;
Value1=Value2*Value3;
// Value1=I1*I2;
// Value3=Value1*I1;
// Value2=Value3*Value1;
// Value3*=Value2;
// Value1*=Value2;
// Value3*=Value1;
}
__syncthreads();
Value=Value1;
C[i]=Value*Value2;
__syncthreads();
}
int main(int argc, char** argv)
{
int iterations;
if(argc!=2) {
fprintf(stderr,"usage: %s #iterations\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
}
printf("Power Microbenchmarks with iterations %d\n",iterations);
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
if (h_A == 0) CleanupResources();
h_B = (float*)malloc(size);
if (h_B == 0) CleanupResources();
h_C = (float*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
RandomInit(h_B, N);
// Allocate vectors in device memory
printf("before\n");
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
printf("after\n");
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
cudaEvent_t start, stop;
float elapsedTime = 0;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
checkCudaErrors(cudaEventRecord(start));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
printf("execution time = %.2f s\n", elapsedTime/1000);
getLastCudaError("kernel launch failure");
cudaThreadSynchronize();
/*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif*/
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_B)
cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_B)
free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
36fa4ac3fa26ea019ac45794ffeb65d8a3da3616.hip | // !!! This is a file automatically generated by hipify!!!
#include <wb.h>
//@@ The purpose of this code is to become familiar with the submission
//@@ process. Do not worry if you do not understand all the details of
//@@ the code.
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
hipGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
}
| 36fa4ac3fa26ea019ac45794ffeb65d8a3da3616.cu | #include <wb.h>
//@@ The purpose of this code is to become familiar with the submission
//@@ process. Do not worry if you do not understand all the details of
//@@ the code.
int main(int argc, char **argv) {
int deviceCount;
wbArg_read(argc, argv);
cudaGetDeviceCount(&deviceCount);
wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
wbLog(TRACE, "No CUDA GPU has been detected");
return -1;
} else if (deviceCount == 1) {
//@@ WbLog is a provided logging API (similar to Log4J).
//@@ The logging function wbLog takes a level which is either
//@@ OFF, FATAL, ERROR, WARN, INFO, DEBUG, or TRACE and a
//@@ message to be printed.
wbLog(TRACE, "There is 1 device supporting CUDA");
} else {
wbLog(TRACE, "There are ", deviceCount,
" devices supporting CUDA");
}
}
wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name);
wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".",
deviceProp.minor);
wbLog(TRACE, " Maximum global memory size: ",
deviceProp.totalGlobalMem);
wbLog(TRACE, " Maximum constant memory size: ",
deviceProp.totalConstMem);
wbLog(TRACE, " Maximum shared memory size per block: ",
deviceProp.sharedMemPerBlock);
wbLog(TRACE, " Maximum block dimensions: ",
deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1],
" x ", deviceProp.maxThreadsDim[2]);
wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0],
" x ", deviceProp.maxGridSize[1], " x ",
deviceProp.maxGridSize[2]);
wbLog(TRACE, " Warp size: ", deviceProp.warpSize);
}
wbTime_stop(GPU, "Getting GPU Data."); //@@ stop the timer
return 0;
}
|
f567d1e06404c0c6c815d9a2b3d8982904a9c373.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_1x1_layer_updater_cuda.h"
#include <hip/hip_runtime.h>
#include "util_cuda.h"
#include "neural_network_cusparse_exception.h"
#include "neural_network_cublas_exception.h"
#include "neural_network_cuda_exception.h"
#include "../sparse_convolution_layer.h"
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
namespace nnforge
{
namespace cuda
{
__global__ void copy_bias_sparse_upd_kernel(
const float * __restrict biases,
float * __restrict output,
int output_neuron_count,
int entry_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
if ((output_neuron_id < output_neuron_count) && (entry_id < entry_count))
{
float bias = biases[output_neuron_id];
float * current_output = output + (int)(entry_id * output_neuron_count + output_neuron_id);
#pragma unroll
for(int i = 0; i < 4; ++i)
{
if (entry_id < entry_count)
*current_output = bias;
current_output += output_neuron_count;
entry_id++;
}
}
}
__global__ void sparse_fully_connected_update_biases_upd_kernel(
float * __restrict gradient_biases,
const float * __restrict output_errors,
int block_size,
int output_elem_count_per_entry,
int entry_count,
int block_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int block_id = blockIdx.y * blockDim.y + threadIdx.y;
if ((output_neuron_id < output_elem_count_per_entry) && (block_id < block_count))
{
int base_entry_id = block_size * block_id;
int iteration_count = min(entry_count - base_entry_id, block_size);
const float * current_error = output_errors + (base_entry_id * output_elem_count_per_entry + output_neuron_id);
float sum = 0.0F;
for(int i = 0; i < iteration_count; ++i)
{
sum += *current_error;
current_error += output_elem_count_per_entry;
}
atomicAdd(gradient_biases + output_neuron_id, sum);
}
}
#define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4
__global__ void sparse_fully_connected_backprop_upd_kernel(
const float * __restrict output_errors,
float * __restrict input_errors,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
valid[i] = (i < max_valid_lane);
int column_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
float w[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
{
int index = valid[i] ? base_nnz_index + i : (end_column_index - 1);
column_ids[i] = __load_nc(column_indices + index);
w[i] = __load_nc(weights + index);
}
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int i = 0; i < entry32_block_size; ++i, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(input_errors + column_ids[i] * entry_count + current_entry_id, output_error * w[i]);
}
}
}
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
extern __shared__ float arr_sh[];
template<bool single_entry_pass>
__global__ void sparse_fully_connected_update_weights_kernel(
const float * __restrict output_errors,
const float * __restrict input_neurons,
float * __restrict gradient_weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
valid[i] = (i < max_valid_lane);
int column_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
column_ids[i] = __load_nc(column_indices + (valid[i] ? base_nnz_index + i : (end_column_index - 1)));
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int i = 0; i < entry32_block_size; ++i, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __load_nc(input_neurons + column_ids[i] * entry_count + current_entry_id) * output_error;
}
}
#if __CUDA_ARCH__ < 300
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile float * arr = arr_sh;
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] = sums[i];
#endif
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#if __CUDA_ARCH__ < 300
if (lane_id < tx)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] += arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id + tx];
}
#else
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
#endif
}
#if __CUDA_ARCH__ < 300
if (lane_id < max_valid_lane)
sums[0] = arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + lane_id * 32];
#else
#pragma unroll
for(int i = 1; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (lane_id == i)
sums[0] = sums[i];
#endif
if (lane_id < max_valid_lane)
{
if (single_entry_pass)
{
gradient_weights[base_nnz_index + lane_id] += sums[0];
}
else
{
atomicAdd(gradient_weights + base_nnz_index + lane_id, sums[0]);
}
}
}
sparse_fully_connected_1x1_layer_updater_cuda::sparse_fully_connected_1x1_layer_updater_cuda()
{
}
sparse_fully_connected_1x1_layer_updater_cuda::~sparse_fully_connected_1x1_layer_updater_cuda()
{
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_entry,
(entry_count + 4 - 1) / 4,
1);
hipLaunchKernelGGL(( copy_bias_sparse_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*data[1],
*output_neurons_buffer,
output_elem_count_per_entry,
entry_count);
cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 1.0F;
hipsparseMatDescr_t mat_descr;
cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr));
cusparse_safe_call(hipsparseScsrmm(
cuda_config->get_cusparse_handle(),
HIPSPARSE_OPERATION_NON_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
(const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id,
input_elem_count_per_entry,
&beta,
*output_neurons_buffer,
output_elem_count_per_entry));
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backprop(
hipStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Too slow
/*
cusparse_safe_call(hipsparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
hipsparseMatDescr_t mat_descr;
cusparse_safe_call(hipsparseCreateMatDescr(&mat_descr));
cusparse_safe_call(hipsparseScsrmm(
cuda_config->get_cusparse_handle(),
HIPSPARSE_OPERATION_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
*input_errors_buffer,
input_elem_count_per_entry));
*/
cuda_util::set_with_value(
*cuda_config,
*additional_buffers[0],
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> entry32_block_size_and_count = get_entry32_backprop_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
hipLaunchKernelGGL(( sparse_fully_connected_backprop_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*additional_buffers[1],
*additional_buffers[0],
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose input
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgeam(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
input_elem_count_per_entry,
entry_count,
&alpha,
*additional_buffers[0],
entry_count,
&beta,
*input_errors_buffer,
input_elem_count_per_entry,
*input_errors_buffer,
input_elem_count_per_entry));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
hipStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update weights
{
cublas_safe_call(hipblasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose input
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgeam(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
entry_count,
input_elem_count_per_entry,
&alpha,
(const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id,
input_elem_count_per_entry,
&beta,
*additional_buffers[0],
entry_count,
*additional_buffers[0],
entry_count));
}
// transpose output
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(hipblasSgeam(
cuda_config->get_cublas_handle(),
HIPBLAS_OP_T,
HIPBLAS_OP_N,
entry_count,
output_elem_count_per_entry,
&alpha,
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
*additional_buffers[1],
entry_count,
*additional_buffers[1],
entry_count));
}
std::pair<int, int> entry32_block_size_and_count = get_entry32_update_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = (cuda_config->get_compute_capability() < 300) ? threadblock_size * OUTPUT_ELEM_COUNT_BLOCK_SIZE * sizeof(float) : 0;
if (entry32_block_size_and_count.second > 1)
{
hipLaunchKernelGGL(( sparse_fully_connected_update_weights_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*additional_buffers[1],
*additional_buffers[0],
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
else
{
hipLaunchKernelGGL(( sparse_fully_connected_update_weights_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), smem_size, stream_id,
*additional_buffers[1],
*additional_buffers[0],
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
}
// Update biases
{
int block_size = get_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_entry,
block_count,
1);
hipLaunchKernelGGL(( sparse_fully_connected_update_biases_upd_kernel), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id,
*gradient[1],
*output_errors_buffer,
block_size,
output_elem_count_per_entry,
entry_count,
block_count);
}
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
int sparse_fully_connected_1x1_layer_updater_cuda::get_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
void sparse_fully_connected_1x1_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
int input_data_single_update_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float);
max_entry32_update_block_size = ::max(1, cuda_config->l2_cache_size / 2 / input_data_single_update_32block_entry_size);
int input_data_single_backprop_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float);
max_entry32_backprop_block_size = ::max(1, cuda_config->l2_cache_size / 2 / input_data_single_backprop_32block_entry_size);
}
std::vector<size_t> sparse_fully_connected_1x1_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
void sparse_fully_connected_1x1_layer_updater_cuda::notify_data_custom(const_layer_data_custom_smart_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = ::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_update_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_update_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_update_block_size - 1) / max_entry32_update_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_backprop_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_backprop_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_backprop_block_size - 1) / max_entry32_backprop_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
| f567d1e06404c0c6c815d9a2b3d8982904a9c373.cu | /*
* Copyright 2011-2014 Maxim Milakov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "sparse_fully_connected_1x1_layer_updater_cuda.h"
#include <cuda_runtime.h>
#include "util_cuda.h"
#include "neural_network_cusparse_exception.h"
#include "neural_network_cublas_exception.h"
#include "neural_network_cuda_exception.h"
#include "../sparse_convolution_layer.h"
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
namespace nnforge
{
namespace cuda
{
__global__ void copy_bias_sparse_upd_kernel(
const float * __restrict biases,
float * __restrict output,
int output_neuron_count,
int entry_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int entry_id = (blockIdx.y * blockDim.y + threadIdx.y) * 4;
if ((output_neuron_id < output_neuron_count) && (entry_id < entry_count))
{
float bias = biases[output_neuron_id];
float * current_output = output + (int)(entry_id * output_neuron_count + output_neuron_id);
#pragma unroll
for(int i = 0; i < 4; ++i)
{
if (entry_id < entry_count)
*current_output = bias;
current_output += output_neuron_count;
entry_id++;
}
}
}
__global__ void sparse_fully_connected_update_biases_upd_kernel(
float * __restrict gradient_biases,
const float * __restrict output_errors,
int block_size,
int output_elem_count_per_entry,
int entry_count,
int block_count)
{
int output_neuron_id = blockIdx.x * blockDim.x + threadIdx.x;
int block_id = blockIdx.y * blockDim.y + threadIdx.y;
if ((output_neuron_id < output_elem_count_per_entry) && (block_id < block_count))
{
int base_entry_id = block_size * block_id;
int iteration_count = min(entry_count - base_entry_id, block_size);
const float * current_error = output_errors + (base_entry_id * output_elem_count_per_entry + output_neuron_id);
float sum = 0.0F;
for(int i = 0; i < iteration_count; ++i)
{
sum += *current_error;
current_error += output_elem_count_per_entry;
}
atomicAdd(gradient_biases + output_neuron_id, sum);
}
}
#define OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE 4
__global__ void sparse_fully_connected_backprop_upd_kernel(
const float * __restrict output_errors,
float * __restrict input_errors,
const float * __restrict weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
valid[i] = (i < max_valid_lane);
int column_ids[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
float w[OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
{
int index = valid[i] ? base_nnz_index + i : (end_column_index - 1);
column_ids[i] = __load_nc(column_indices + index);
w[i] = __load_nc(weights + index);
}
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int i = 0; i < entry32_block_size; ++i, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BACKPROP_BLOCK_SIZE; ++i)
if (valid[i])
atomicAdd(input_errors + column_ids[i] * entry_count + current_entry_id, output_error * w[i]);
}
}
}
#define OUTPUT_ELEM_COUNT_BLOCK_SIZE 4
extern __shared__ float arr_sh[];
template<bool single_entry_pass>
__global__ void sparse_fully_connected_update_weights_kernel(
const float * __restrict output_errors,
const float * __restrict input_neurons,
float * __restrict gradient_weights,
const int * __restrict column_indices,
const int * __restrict row_ptrs,
int output_elem_count_per_entry,
int entry_count,
int entry32_block_size)
{
int row_id = blockIdx.y * blockDim.y + threadIdx.y;
if (row_id >= output_elem_count_per_entry)
return;
int start_column_index = __load_nc(row_ptrs + row_id);
int end_column_index = __load_nc(row_ptrs + row_id + 1);
int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x;
int base_column_index_offset = (thread_id_x >> 5) * OUTPUT_ELEM_COUNT_BLOCK_SIZE;
int base_nnz_index = start_column_index + base_column_index_offset;
if (base_nnz_index >= end_column_index)
return;
int max_valid_lane = min(OUTPUT_ELEM_COUNT_BLOCK_SIZE, end_column_index - base_nnz_index);
bool valid[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
valid[i] = (i < max_valid_lane);
int column_ids[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
column_ids[i] = __load_nc(column_indices + (valid[i] ? base_nnz_index + i : (end_column_index - 1)));
int base_entry_id = ((blockIdx.z * blockDim.z + threadIdx.z) << 5) * entry32_block_size;
if (base_entry_id >= entry_count)
return;
int lane_id = thread_id_x & 31;
int current_entry_id = base_entry_id + lane_id;
float sums[OUTPUT_ELEM_COUNT_BLOCK_SIZE];
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] = 0.0F;
const float * base_output_errors = output_errors + row_id * entry_count;
for(int i = 0; i < entry32_block_size; ++i, current_entry_id += 32)
{
if (current_entry_id < entry_count)
{
float output_error = __load_nc(base_output_errors + current_entry_id);
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __load_nc(input_neurons + column_ids[i] * entry_count + current_entry_id) * output_error;
}
}
#if __CUDA_ARCH__ < 300
int thread_id = blockDim.x * (threadIdx.z * blockDim.y + threadIdx.y) + threadIdx.x;
int warp_id = thread_id >> 5;
volatile float * arr = arr_sh;
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] = sums[i];
#endif
#pragma unroll
for(int tx = 16; tx > 0; tx >>= 1)
{
#if __CUDA_ARCH__ < 300
if (lane_id < tx)
{
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id] += arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + i * 32 + lane_id + tx];
}
#else
#pragma unroll
for(int i = 0; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
sums[i] += __shfl_xor(sums[i], tx);
#endif
}
#if __CUDA_ARCH__ < 300
if (lane_id < max_valid_lane)
sums[0] = arr[warp_id * (32 * OUTPUT_ELEM_COUNT_BLOCK_SIZE) + lane_id * 32];
#else
#pragma unroll
for(int i = 1; i < OUTPUT_ELEM_COUNT_BLOCK_SIZE; ++i)
if (lane_id == i)
sums[0] = sums[i];
#endif
if (lane_id < max_valid_lane)
{
if (single_entry_pass)
{
gradient_weights[base_nnz_index + lane_id] += sums[0];
}
else
{
atomicAdd(gradient_weights + base_nnz_index + lane_id, sums[0]);
}
}
}
sparse_fully_connected_1x1_layer_updater_cuda::sparse_fully_connected_1x1_layer_updater_cuda()
{
}
sparse_fully_connected_1x1_layer_updater_cuda::~sparse_fully_connected_1x1_layer_updater_cuda()
{
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_test(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_entry,
(entry_count + 4 - 1) / 4,
1);
copy_bias_sparse_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*data[1],
*output_neurons_buffer,
output_elem_count_per_entry,
entry_count);
cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 1.0F;
cusparseMatDescr_t mat_descr;
cusparse_safe_call(cusparseCreateMatDescr(&mat_descr));
cusparse_safe_call(cusparseScsrmm(
cuda_config->get_cusparse_handle(),
CUSPARSE_OPERATION_NON_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
(const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id,
input_elem_count_per_entry,
&beta,
*output_neurons_buffer,
output_elem_count_per_entry));
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_backprop(
cudaStream_t stream_id,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const_cuda_linear_buffer_device_smart_ptr output_neurons_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
cuda_linear_buffer_device_smart_ptr input_errors_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Too slow
/*
cusparse_safe_call(cusparseSetStream(cuda_config->get_cusparse_handle(), stream_id));
float alpha = 1.0F;
float beta = 0.0F;
cusparseMatDescr_t mat_descr;
cusparse_safe_call(cusparseCreateMatDescr(&mat_descr));
cusparse_safe_call(cusparseScsrmm(
cuda_config->get_cusparse_handle(),
CUSPARSE_OPERATION_TRANSPOSE,
output_elem_count_per_entry,
entry_count,
input_elem_count_per_entry,
feature_map_connection_count,
&alpha,
mat_descr,
*data[0],
*data_custom[1],
*data_custom[0],
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
*input_errors_buffer,
input_elem_count_per_entry));
*/
cuda_util::set_with_value(
*cuda_config,
*additional_buffers[0],
0.0F,
input_elem_count_per_entry * entry_count,
stream_id);
std::pair<int, int> entry32_block_size_and_count = get_entry32_backprop_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
sparse_fully_connected_backprop_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*additional_buffers[1],
*additional_buffers[0],
*data[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose input
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgeam(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
input_elem_count_per_entry,
entry_count,
&alpha,
*additional_buffers[0],
entry_count,
&beta,
*input_errors_buffer,
input_elem_count_per_entry,
*input_errors_buffer,
input_elem_count_per_entry));
}
}
void sparse_fully_connected_1x1_layer_updater_cuda::enqueue_update_weights(
unsigned int offset_input_entry_id,
cudaStream_t stream_id,
const std::vector<cuda_linear_buffer_device_smart_ptr>& gradient,
const std::vector<cuda_linear_buffer_device_smart_ptr>& data_custom,
const std::vector<const_cuda_linear_buffer_device_smart_ptr>& schema_data,
cuda_linear_buffer_device_smart_ptr output_errors_buffer,
const_cuda_linear_buffer_device_smart_ptr input_neurons_buffer,
const std::vector<cuda_linear_buffer_device_smart_ptr>& additional_buffers,
std::vector<cuda_memobject_smart_ptr>& dynamic_memobjects,
unsigned int entry_count)
{
// Update weights
{
cublas_safe_call(cublasSetStream(cuda_config->get_cublas_handle(), stream_id));
// transpose input
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgeam(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
entry_count,
input_elem_count_per_entry,
&alpha,
(const float *)(*input_neurons_buffer) + input_elem_count_per_entry * offset_input_entry_id,
input_elem_count_per_entry,
&beta,
*additional_buffers[0],
entry_count,
*additional_buffers[0],
entry_count));
}
// transpose output
{
float alpha = 1.0F;
float beta = 0.0F;
cublas_safe_call(cublasSgeam(
cuda_config->get_cublas_handle(),
CUBLAS_OP_T,
CUBLAS_OP_N,
entry_count,
output_elem_count_per_entry,
&alpha,
*output_errors_buffer,
output_elem_count_per_entry,
&beta,
*additional_buffers[1],
entry_count,
*additional_buffers[1],
entry_count));
}
std::pair<int, int> entry32_block_size_and_count = get_entry32_update_block_size_and_count(entry_count);
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
32 * ((max_column_index_count_per_row + OUTPUT_ELEM_COUNT_BLOCK_SIZE - 1) / OUTPUT_ELEM_COUNT_BLOCK_SIZE),
output_elem_count_per_entry,
entry32_block_size_and_count.second,
32);
int threadblock_size = kernel_dims.second.x * kernel_dims.second.y * kernel_dims.second.z;
int smem_size = (cuda_config->get_compute_capability() < 300) ? threadblock_size * OUTPUT_ELEM_COUNT_BLOCK_SIZE * sizeof(float) : 0;
if (entry32_block_size_and_count.second > 1)
{
sparse_fully_connected_update_weights_kernel<false><<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*additional_buffers[1],
*additional_buffers[0],
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
else
{
sparse_fully_connected_update_weights_kernel<true><<<kernel_dims.first, kernel_dims.second, smem_size, stream_id>>>(
*additional_buffers[1],
*additional_buffers[0],
*gradient[0],
*data_custom[0],
*data_custom[1],
output_elem_count_per_entry,
entry_count,
entry32_block_size_and_count.first);
}
}
// Update biases
{
int block_size = get_block_size(entry_count);
int block_count = (entry_count + block_size - 1) / block_size;
std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access(
*cuda_config,
output_elem_count_per_entry,
block_count,
1);
sparse_fully_connected_update_biases_upd_kernel<<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>(
*gradient[1],
*output_errors_buffer,
block_size,
output_elem_count_per_entry,
entry_count,
block_count);
}
}
bool sparse_fully_connected_1x1_layer_updater_cuda::is_in_place_backprop() const
{
return false;
}
int sparse_fully_connected_1x1_layer_updater_cuda::get_block_size(int entry_count)
{
int block_size = std::min<int>(std::max<int>(static_cast<int>(sqrtf(static_cast<float>(entry_count))), 1), entry_count);
return block_size;
}
void sparse_fully_connected_1x1_layer_updater_cuda::updater_configured()
{
nnforge_shared_ptr<const sparse_convolution_layer> layer_derived = nnforge_dynamic_pointer_cast<const sparse_convolution_layer>(layer_schema);
feature_map_connection_count = layer_derived->feature_map_connection_count;
int input_data_single_update_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float);
max_entry32_update_block_size = std::max(1, cuda_config->l2_cache_size / 2 / input_data_single_update_32block_entry_size);
int input_data_single_backprop_32block_entry_size = input_elem_count_per_entry * 32 * sizeof(float);
max_entry32_backprop_block_size = std::max(1, cuda_config->l2_cache_size / 2 / input_data_single_backprop_32block_entry_size);
}
std::vector<size_t> sparse_fully_connected_1x1_layer_updater_cuda::get_sizes_of_additional_buffers_per_entry() const
{
std::vector<size_t> res;
res.push_back(input_elem_count_per_entry * sizeof(float));
res.push_back(output_elem_count_per_entry * sizeof(float));
return res;
}
void sparse_fully_connected_1x1_layer_updater_cuda::notify_data_custom(const_layer_data_custom_smart_ptr host_data_custom)
{
max_column_index_count_per_row = 0;
const std::vector<int>& row_indices = host_data_custom->at(1);
for(int i = 0; i < row_indices.size() - 1; ++i)
max_column_index_count_per_row = std::max(max_column_index_count_per_row, row_indices[i + 1] - row_indices[i]);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_update_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_update_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_update_block_size - 1) / max_entry32_update_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
std::pair<int, int> sparse_fully_connected_1x1_layer_updater_cuda::get_entry32_backprop_block_size_and_count(unsigned int entry_count) const
{
int candidate_block_size = (entry_count + 32 - 1) / 32;
if (candidate_block_size <= max_entry32_backprop_block_size)
return std::make_pair(candidate_block_size, 1);
int candidate_block_count2 = (candidate_block_size + max_entry32_backprop_block_size - 1) / max_entry32_backprop_block_size;
int candidate_block_size2 = (candidate_block_size + candidate_block_count2 - 1) / candidate_block_count2;
return std::make_pair(candidate_block_size2, candidate_block_count2);
}
}
}
|
0d7a37b3a57b235bc6700e18ca3cbe5f691c0850.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "defines.cu"
#include "highlight.cu"
extern "C"
__global__ void cutter_1d_forward(const dtype *input,
const dtype alpha,
const int input_sample_size,
dtype *output,
const dtype beta,
const int output_sample_size,
const int interval_length,
const int limit) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < limit) {
int sample_number = index / interval_length;
int sample_offset = index % interval_length;
dtype x = input[sample_number * input_sample_size + sample_offset] * alpha;
int output_offset = sample_number * output_sample_size + sample_offset;
output[output_offset] = beta ? output[output_offset] * beta + x : x;
}
}
| 0d7a37b3a57b235bc6700e18ca3cbe5f691c0850.cu | #include "defines.cu"
#include "highlight.cu"
extern "C"
__global__ void cutter_1d_forward(const dtype *input,
const dtype alpha,
const int input_sample_size,
dtype *output,
const dtype beta,
const int output_sample_size,
const int interval_length,
const int limit) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < limit) {
int sample_number = index / interval_length;
int sample_offset = index % interval_length;
dtype x = input[sample_number * input_sample_size + sample_offset] * alpha;
int output_offset = sample_number * output_sample_size + sample_offset;
output[output_offset] = beta ? output[output_offset] * beta + x : x;
}
}
|
2cf9c576839518d41f54af86a11908fef9b29a88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipcub/hipcub.hpp>
#include "caffe2/utils/cub_namespace.cuh"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/rmac_regions_op.h"
#if defined(USE_ROCM)
#include <cfloat>
#endif
#if defined(USE_ROCM)
namespace rocprim {
#else
#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
namespace at_cuda_detail {
#endif
namespace cub {
#endif
template <typename KeyT, typename ValueT>
inline __host__ __device__ bool operator<(
const hipcub::KeyValuePair<KeyT, ValueT>& kv1,
const hipcub::KeyValuePair<KeyT, ValueT>& kv2) {
return (kv1.value < kv2.value) ||
(kv1.value == kv2.value && kv2.key < kv1.key);
}
} // namespace cub
#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
} // namespace at_cuda_detail
#endif
namespace caffe2 {
namespace {
__global__ void NumRMACRegionsKernel(
const int W,
const int H,
const int min_step,
const int max_step,
const float overlap,
const int scales,
int* num_rois_data) {
// steps(idx) regions for long dimension
typedef hipcub::KeyValuePair<int, float> KeyValuePair; // <step, value>
KeyValuePair kv, min_kv;
min_kv.value = FLT_MAX;
// Local reduction
int minW = min(H, W);
int diff = max(H, W) - minW;
CUDA_1D_KERNEL_LOOP(index, max_step - min_step + 1) {
kv.key = min_step + index;
float b = diff / (1.0 * kv.key);
kv.value = fabsf((minW * minW - minW * b) / (minW * minW) - overlap);
if (kv < min_kv) {
min_kv = kv;
}
}
// Block-wise arg-min reduction to find step
int step;
{
typedef hipcub::BlockReduce<KeyValuePair, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
min_kv = BlockReduce(temp_storage).Reduce(min_kv, hipcub::Min());
__shared__ int step_shared;
if (threadIdx.x == 0) {
step_shared = min_kv.key;
}
__syncthreads();
step = step_shared;
}
// Region overplus per dimension
int Wd = (W > H) ? step : 0;
int Hd = (H > W) ? step : 0;
// Local reduction to compute the total number of rois at all scales
int num_rois = 0;
CUDA_1D_KERNEL_LOOP(index, scales) {
int l = index + 1;
int region_size = 2 * minW / (l + 1);
num_rois += (region_size > 0) ? ((l + Wd) * (l + Hd)) : 0;
}
// Block-wise sum reduction to compute num_rois at all scales
{
typedef hipcub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
num_rois = BlockReduce(temp_storage).Sum(num_rois);
}
if (threadIdx.x == 0) {
num_rois_data[0] = num_rois;
num_rois_data[1] = Wd;
num_rois_data[2] = Hd;
}
}
__global__ void RMACRegionsKernel(
const int W,
const int H,
const int N,
const int* num_rois_data,
float* output) {
int num_rois = num_rois_data[0];
int Wd = num_rois_data[1];
int Hd = num_rois_data[2];
// Block-wide temp shared storage for intermediate ROI results to avoid
// uncoalesced writes to global mem
__shared__ float output_shared[CAFFE_CUDA_NUM_THREADS * 5];
CUDA_1D_KERNEL_LOOP(index, N) {
int batch_id = index / num_rois;
int roi_id = index % num_rois;
int roi[5];
roi[0] = batch_id;
// Find the scale corresponding to this index and the roi_id relative
// to the scale.
int l = 0;
int num_rois_at_scale = 0;
do {
roi_id -= num_rois_at_scale;
l++;
num_rois_at_scale = (l + Wd) * (l + Hd);
} while (roi_id - num_rois_at_scale >= 0);
int region_size = 2 * min(H, W) / (l + 1);
float bw =
(l + Wd - 1 > 0) ? ((W - region_size) / (1.0 * (l + Wd - 1))) : 0;
float bh =
(l + Hd - 1 > 0) ? ((H - region_size) / (1.0 * (l + Hd - 1))) : 0;
int i = roi_id / (l + Hd);
int j = roi_id % (l + Hd);
roi[1] = bw * i;
roi[2] = bh * j;
// Careful with the borders
if (roi[1] + region_size > W) {
roi[1] -= (roi[1] + region_size - W);
}
if (roi[2] + region_size > H) {
roi[2] -= (roi[2] + region_size - H);
}
roi[3] = roi[1] + region_size - 1;
roi[4] = roi[2] + region_size - 1;
// Writing directly to output (global memory) will result in uncoalesced
// writes. Write output to shared mem first and then write ROI results to
// global output in a coalesced manner.
__syncthreads(); // Since output_shared is reused across loop iterations
for (int i = 0; i < 5; ++i) {
output_shared[threadIdx.x * 5 + i] = roi[i];
}
__syncthreads();
int offset = index - threadIdx.x;
float* output_offset = output + offset * 5;
int num_threads = min(blockDim.x, N - offset); // Active threads in block
for (int i = 0; i < 5; ++i) {
output_offset[num_threads * i + threadIdx.x] =
output_shared[num_threads * i + threadIdx.x];
}
}
}
} // namespace
template <>
bool RMACRegionsOp<CUDAContext>::RunOnDevice() {
const auto& X = Input(0); // Input tensor
// RoIs
if (X.numel() == 0) {
return true;
}
int batch_size = X.dim32(0);
int H = X.dim32(2);
int W = X.dim32(3);
// Compute number of regions
int min_step = 1;
int max_step = 6;
ReinitializeTensor(&num_rois_, {3}, at::dtype<int>().device(CUDA)); // num_rois, Wd, Hd
hipLaunchKernelGGL(( NumRMACRegionsKernel),
dim3(1),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
W,
H,
min_step,
max_step,
overlap_,
scales_,
num_rois_.mutable_data<int>());
C10_HIP_KERNEL_LAUNCH_CHECK();
// Bit awkward, but the size of the output tensor depends on the output of
// NumRMACRegionsKernel (number of RoIs), so need to copy that to CPU
// to Resize() output appropriately.
int num_rois = 0;
context_.CopyBytesToCPU(sizeof(int), num_rois_.data<int>(), &num_rois);
int N = batch_size * num_rois;
auto* output = Output(0, {N, 5}, at::dtype<float>()); // [batch_id x1 y1 x2 y2]
// Compute region coordinates
hipLaunchKernelGGL(( RMACRegionsKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
W, H, N, num_rois_.data<int>(), output->template mutable_data<float>());
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(RMACRegions, RMACRegionsOp<CUDAContext>);
} // namespace caffe2
| 2cf9c576839518d41f54af86a11908fef9b29a88.cu | #include <cub/block/block_reduce.cuh>
#include "caffe2/utils/cub_namespace.cuh"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/rmac_regions_op.h"
#if defined(USE_ROCM)
#include <cfloat>
#endif
#if defined(USE_ROCM)
namespace rocprim {
#else
#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
namespace at_cuda_detail {
#endif
namespace cub {
#endif
template <typename KeyT, typename ValueT>
inline __host__ __device__ bool operator<(
const cub::KeyValuePair<KeyT, ValueT>& kv1,
const cub::KeyValuePair<KeyT, ValueT>& kv2) {
return (kv1.value < kv2.value) ||
(kv1.value == kv2.value && kv2.key < kv1.key);
}
} // namespace cub
#if USE_GLOBAL_CUB_WRAPPED_NAMESPACE()
} // namespace at_cuda_detail
#endif
namespace caffe2 {
namespace {
__global__ void NumRMACRegionsKernel(
const int W,
const int H,
const int min_step,
const int max_step,
const float overlap,
const int scales,
int* num_rois_data) {
// steps(idx) regions for long dimension
typedef cub::KeyValuePair<int, float> KeyValuePair; // <step, value>
KeyValuePair kv, min_kv;
min_kv.value = FLT_MAX;
// Local reduction
int minW = min(H, W);
int diff = max(H, W) - minW;
CUDA_1D_KERNEL_LOOP(index, max_step - min_step + 1) {
kv.key = min_step + index;
float b = diff / (1.0 * kv.key);
kv.value = fabsf((minW * minW - minW * b) / (minW * minW) - overlap);
if (kv < min_kv) {
min_kv = kv;
}
}
// Block-wise arg-min reduction to find step
int step;
{
typedef cub::BlockReduce<KeyValuePair, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
min_kv = BlockReduce(temp_storage).Reduce(min_kv, cub::Min());
__shared__ int step_shared;
if (threadIdx.x == 0) {
step_shared = min_kv.key;
}
__syncthreads();
step = step_shared;
}
// Region overplus per dimension
int Wd = (W > H) ? step : 0;
int Hd = (H > W) ? step : 0;
// Local reduction to compute the total number of rois at all scales
int num_rois = 0;
CUDA_1D_KERNEL_LOOP(index, scales) {
int l = index + 1;
int region_size = 2 * minW / (l + 1);
num_rois += (region_size > 0) ? ((l + Wd) * (l + Hd)) : 0;
}
// Block-wise sum reduction to compute num_rois at all scales
{
typedef cub::BlockReduce<int, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
num_rois = BlockReduce(temp_storage).Sum(num_rois);
}
if (threadIdx.x == 0) {
num_rois_data[0] = num_rois;
num_rois_data[1] = Wd;
num_rois_data[2] = Hd;
}
}
__global__ void RMACRegionsKernel(
const int W,
const int H,
const int N,
const int* num_rois_data,
float* output) {
int num_rois = num_rois_data[0];
int Wd = num_rois_data[1];
int Hd = num_rois_data[2];
// Block-wide temp shared storage for intermediate ROI results to avoid
// uncoalesced writes to global mem
__shared__ float output_shared[CAFFE_CUDA_NUM_THREADS * 5];
CUDA_1D_KERNEL_LOOP(index, N) {
int batch_id = index / num_rois;
int roi_id = index % num_rois;
int roi[5];
roi[0] = batch_id;
// Find the scale corresponding to this index and the roi_id relative
// to the scale.
int l = 0;
int num_rois_at_scale = 0;
do {
roi_id -= num_rois_at_scale;
l++;
num_rois_at_scale = (l + Wd) * (l + Hd);
} while (roi_id - num_rois_at_scale >= 0);
int region_size = 2 * min(H, W) / (l + 1);
float bw =
(l + Wd - 1 > 0) ? ((W - region_size) / (1.0 * (l + Wd - 1))) : 0;
float bh =
(l + Hd - 1 > 0) ? ((H - region_size) / (1.0 * (l + Hd - 1))) : 0;
int i = roi_id / (l + Hd);
int j = roi_id % (l + Hd);
roi[1] = bw * i;
roi[2] = bh * j;
// Careful with the borders
if (roi[1] + region_size > W) {
roi[1] -= (roi[1] + region_size - W);
}
if (roi[2] + region_size > H) {
roi[2] -= (roi[2] + region_size - H);
}
roi[3] = roi[1] + region_size - 1;
roi[4] = roi[2] + region_size - 1;
// Writing directly to output (global memory) will result in uncoalesced
// writes. Write output to shared mem first and then write ROI results to
// global output in a coalesced manner.
__syncthreads(); // Since output_shared is reused across loop iterations
for (int i = 0; i < 5; ++i) {
output_shared[threadIdx.x * 5 + i] = roi[i];
}
__syncthreads();
int offset = index - threadIdx.x;
float* output_offset = output + offset * 5;
int num_threads = min(blockDim.x, N - offset); // Active threads in block
for (int i = 0; i < 5; ++i) {
output_offset[num_threads * i + threadIdx.x] =
output_shared[num_threads * i + threadIdx.x];
}
}
}
} // namespace
template <>
bool RMACRegionsOp<CUDAContext>::RunOnDevice() {
const auto& X = Input(0); // Input tensor
// RoIs
if (X.numel() == 0) {
return true;
}
int batch_size = X.dim32(0);
int H = X.dim32(2);
int W = X.dim32(3);
// Compute number of regions
int min_step = 1;
int max_step = 6;
ReinitializeTensor(&num_rois_, {3}, at::dtype<int>().device(CUDA)); // num_rois, Wd, Hd
NumRMACRegionsKernel<<<
1,
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
W,
H,
min_step,
max_step,
overlap_,
scales_,
num_rois_.mutable_data<int>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
// Bit awkward, but the size of the output tensor depends on the output of
// NumRMACRegionsKernel (number of RoIs), so need to copy that to CPU
// to Resize() output appropriately.
int num_rois = 0;
context_.CopyBytesToCPU(sizeof(int), num_rois_.data<int>(), &num_rois);
int N = batch_size * num_rois;
auto* output = Output(0, {N, 5}, at::dtype<float>()); // [batch_id x1 y1 x2 y2]
// Compute region coordinates
RMACRegionsKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
W, H, N, num_rois_.data<int>(), output->template mutable_data<float>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(RMACRegions, RMACRegionsOp<CUDAContext>);
} // namespace caffe2
|
a6a740a4d029cd0df29a88934e26309d06437408.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "bmpfile.h"
/*Mandelbrot values*/
#define RESOLUTION 8700.0
#define XCENTER -0.55
#define YCENTER 0.4
#define MAX_ITER 1000/2
/*Colour Values*/
#define COLOUR_DEPTH 255
#define COLOUR_MAX 100.0
#define GRADIENT_COLOUR_MAX 200.0
#define FILENAME "my_mandelbrot_fractal3.bmp"
#define BLOCKSIZE 16
/**
* Computes the color gradiant
* color: the output vector
* x: the gradiant (beetween 0 and 360)
* min and max: variation of the RGB channels (Move3D 0 -> 1)
* Check wiki for more details on the colour science: en.wikipedia.org/wiki/HSL_and_HSV
*/
void GroundColorMix(double* color, double x, double min, double max)
{
/*
* Red = 0
* Green = 1
* Blue = 2
*/
double posSlope = (max-min)/60;
double negSlope = (min-max)/60;
if( x < 60 )
{
color[0] = max;
color[1] = posSlope*x+min;
color[2] = min;
return;
}
else if ( x < 120 )
{
color[0] = negSlope*x+2.0*max+min;
color[1] = max;
color[2] = min;
return;
}
else if ( x < 180 )
{
color[0] = min;
color[1] = max;
color[2] = posSlope*x-2.0*max+min;
return;
}
else if ( x < 240 )
{
color[0] = min;
color[1] = negSlope*x+4.0*max+min;
color[2] = max;
return;
}
else if ( x < 300 )
{
color[0] = posSlope*x-4.0*max+min;
color[1] = min;
color[2] = max;
return;
}
else
{
color[0] = max;
color[1] = min;
color[2] = negSlope*x+6*max;
return;
}
}
/* Mandelbrot Set Image Demonstration
*
* This is a simple single-process/single thread implementation
* that computes a mandelbrot set and produces a corresponding
* Bitmap image. The program demonstrates the use of a colour
* gradient
*
* This program uses the algorithm outlined in:
* "Building Parallel Programs: SMPs, Clusters And Java", Alan Kaminsky
*
* This program requires libbmp for all bitmap operations.
*
*/
/*
* Function to read in height and width values for the bmp
* file to be produce. Exits if less than 3 arguments given.
*/
void getParameters(int argc, char** argv, int* height, int* width, size_t* size){
if (argc < 3 || argv[1] < 0 || argv[2] < 0){
printf("Usage: <mandelbrot_parallel> <height> <width> \n\n");
exit(EXIT_SUCCESS);
}
*height = atoi(argv[1]);
*width = atoi(argv[2]);
*size = *height * *width;
};
/*
* Structure to hold metadata needed for the production of a series
* of pixels for Mandelbrot fractal.
*/
typedef struct {
int width;
int height;
float xcenter;
float ycenter;
float resolution;
int iterations;
} Mandelbrot;
/*
* Kernel functions which returns an array to output of 'iter' values
* to be used in color function.
*/
/*
* KERNEL FUNCTION TO POPULATE VALUES
*/
//col and row values to be worked out from thread position?
//double x = XCENTER + (xoffset + col) / RESOLUTION;
//double y = YCENTER + (yoffset - row) / RESOLUTION;
/*
* We are going to want a thread to do each of the pixels. Therefore
* we're going to need x*y threads giving us (x*y)/1024 blocks.
*
* If we structure the blocks in a 2d grid, we can just have them
* pluck out their x,y pixel based on their own x,y location.
*
* They can return to x+y...this causes a clash...
*
* To do the calculation each thread will need an array of
* x values and an array of y values.
*
* To calculate x we need XCENTER, RESOLUTION, and xoffset.
* To calculate y we need YCENTER, RESOLUTION, and yoffset.
*
* We want to return an iter value corresponding to that pixel that
* it represents.
*/
__global__ void MandelbrotFractal(float* output, Mandelbrot M)
{
//get information from 2D block/thread grid
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//we're only interested in processing threads that fall within
//the boundaries of the picure
if (row < M.width && col < M.height){
int xoffset = -(M.width - 1) /2;
int yoffset = (M.height -1) / 2;
//Determine where in the mandelbrot set, the pixel is referencing
double x = M.xcenter + (xoffset + col) / M.resolution;
double y = M.ycenter + (yoffset - row) / M.resolution;
//Mandelbrot stuff
double a = 0;
double b = 0;
double aold = 0;
double bold = 0;
double zmagsqr = 0;
int iter = 0; //import one!
//Check if the x,y coord are part of the mendelbrot set - refer to the algorithm
while(iter < M.iterations && zmagsqr <= 4.0){
++iter;
a = (aold * aold) - (bold * bold) + x;
b = 2.0 * aold*bold + y;
zmagsqr = a*a + b*b;
aold = a;
bold = b;
}
//output is a 1D array, so we need to index using our row and
//column number
output[row * M.width + col] = iter;
}
}
/*
* Function to package globals for easier sending to
* device.
*/
void makeMandel(MandelBrot* M){
M.iterations = MAX_ITER;
M.resolution = RESOLUTION;
M.ycenter = YCENTER;
M.xcenter = XCENTER;
}
int main(int argc, char **argv)
{
int height, width;
size_t size;
hipError_t error;
getParameters(argc, argv, &height, &width, &size);
bmpfile_t *bmp;
rgb_pixel_t pixel = {0, 0, 0, 0};
int xoffset = -(width - 1) /2;
int yoffset = (height -1) / 2;
bmp = bmp_create(width, height, 32);
Mandelbrot h_mandel;
makeMandel(&h_mandel);
h_mandel.width = width;
h_mandel.height = height;
//memory to hold results
float* h_xy = ( float*) malloc (size);
//allocate device memory
float* d_xy;
error = hipMalloc(&d_xy, size);
if (error != hipSuccess)
{
printf("hipMalloc d_xy returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
Mandelbrot* d_Mandel;
error = hipMalloc(&d_Mandel, sizeof(Mandelbrot));
if (error != hipSuccess)
{
printf("hipMalloc d_Mandel returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
//copy Mandelbrot metadata to device
error = hipMemcpy(d_Mandel, h_mandel, sizeof(Mandelbrot), hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_Mandel,h_mandel) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
//figure out blocks
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//figure out threads
dim3 dimGrid(h_mandel.width / dimBlock.x + 1, h_mandel.height / dimBlock.y + 1);
//call kernel function
hipLaunchKernelGGL(( MandelbrotFractal), dim3(dimGrid), dim3(dimBlock), 0, 0, d_xy, d_Mandel);
//get data from kernel to device
error = hipMemcpy(h_xy, d_xy, size, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_xy,d_xy) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
/* Generate the colour of the pixel from the **iter** value */
/* You can mess around with the colour settings to use different gradients */
/* Colour currently maps from royal blue to red */
/* We're interested in iter */
int i;
for (i = 0; i < width*height; i++){
x_col = (COLOUR_MAX - (( ((float) iter / ((float) MAX_ITER) * GRADIENT_COLOUR_MAX))));
GroundColorMix(color, x_col, 1, COLOUR_DEPTH);
pixel.red = color[0];
pixel.green = color[1];
pixel.blue = color[2];
int row = i/width;
int col = i % width;
//adds pixel color to image
bmp_set_pixel(bmp, col, row, pixel);
}
bmp_save(bmp, FILENAME);
//free all memory used
bmp_destroy(bmp);
free(h_xy);
free(h_mandel);
hipFree(d_xy);
hipFree(d_Mandel);
return 0;
}
| a6a740a4d029cd0df29a88934e26309d06437408.cu | #include <stdio.h>
#include <stdlib.h>
#include "bmpfile.h"
/*Mandelbrot values*/
#define RESOLUTION 8700.0
#define XCENTER -0.55
#define YCENTER 0.4
#define MAX_ITER 1000/2
/*Colour Values*/
#define COLOUR_DEPTH 255
#define COLOUR_MAX 100.0
#define GRADIENT_COLOUR_MAX 200.0
#define FILENAME "my_mandelbrot_fractal3.bmp"
#define BLOCKSIZE 16
/**
* Computes the color gradiant
* color: the output vector
* x: the gradiant (beetween 0 and 360)
* min and max: variation of the RGB channels (Move3D 0 -> 1)
* Check wiki for more details on the colour science: en.wikipedia.org/wiki/HSL_and_HSV
*/
void GroundColorMix(double* color, double x, double min, double max)
{
/*
* Red = 0
* Green = 1
* Blue = 2
*/
double posSlope = (max-min)/60;
double negSlope = (min-max)/60;
if( x < 60 )
{
color[0] = max;
color[1] = posSlope*x+min;
color[2] = min;
return;
}
else if ( x < 120 )
{
color[0] = negSlope*x+2.0*max+min;
color[1] = max;
color[2] = min;
return;
}
else if ( x < 180 )
{
color[0] = min;
color[1] = max;
color[2] = posSlope*x-2.0*max+min;
return;
}
else if ( x < 240 )
{
color[0] = min;
color[1] = negSlope*x+4.0*max+min;
color[2] = max;
return;
}
else if ( x < 300 )
{
color[0] = posSlope*x-4.0*max+min;
color[1] = min;
color[2] = max;
return;
}
else
{
color[0] = max;
color[1] = min;
color[2] = negSlope*x+6*max;
return;
}
}
/* Mandelbrot Set Image Demonstration
*
* This is a simple single-process/single thread implementation
* that computes a mandelbrot set and produces a corresponding
* Bitmap image. The program demonstrates the use of a colour
* gradient
*
* This program uses the algorithm outlined in:
* "Building Parallel Programs: SMPs, Clusters And Java", Alan Kaminsky
*
* This program requires libbmp for all bitmap operations.
*
*/
/*
* Function to read in height and width values for the bmp
* file to be produce. Exits if less than 3 arguments given.
*/
void getParameters(int argc, char** argv, int* height, int* width, size_t* size){
if (argc < 3 || argv[1] < 0 || argv[2] < 0){
printf("Usage: <mandelbrot_parallel> <height> <width> \n\n");
exit(EXIT_SUCCESS);
}
*height = atoi(argv[1]);
*width = atoi(argv[2]);
*size = *height * *width;
};
/*
* Structure to hold metadata needed for the production of a series
* of pixels for Mandelbrot fractal.
*/
typedef struct {
int width;
int height;
float xcenter;
float ycenter;
float resolution;
int iterations;
} Mandelbrot;
/*
* Kernel functions which returns an array to output of 'iter' values
* to be used in color function.
*/
/*
* KERNEL FUNCTION TO POPULATE VALUES
*/
//col and row values to be worked out from thread position?
//double x = XCENTER + (xoffset + col) / RESOLUTION;
//double y = YCENTER + (yoffset - row) / RESOLUTION;
/*
* We are going to want a thread to do each of the pixels. Therefore
* we're going to need x*y threads giving us (x*y)/1024 blocks.
*
* If we structure the blocks in a 2d grid, we can just have them
* pluck out their x,y pixel based on their own x,y location.
*
* They can return to x+y...this causes a clash...
*
* To do the calculation each thread will need an array of
* x values and an array of y values.
*
* To calculate x we need XCENTER, RESOLUTION, and xoffset.
* To calculate y we need YCENTER, RESOLUTION, and yoffset.
*
* We want to return an iter value corresponding to that pixel that
* it represents.
*/
__global__ void MandelbrotFractal(float* output, Mandelbrot M)
{
//get information from 2D block/thread grid
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
//we're only interested in processing threads that fall within
//the boundaries of the picure
if (row < M.width && col < M.height){
int xoffset = -(M.width - 1) /2;
int yoffset = (M.height -1) / 2;
//Determine where in the mandelbrot set, the pixel is referencing
double x = M.xcenter + (xoffset + col) / M.resolution;
double y = M.ycenter + (yoffset - row) / M.resolution;
//Mandelbrot stuff
double a = 0;
double b = 0;
double aold = 0;
double bold = 0;
double zmagsqr = 0;
int iter = 0; //import one!
//Check if the x,y coord are part of the mendelbrot set - refer to the algorithm
while(iter < M.iterations && zmagsqr <= 4.0){
++iter;
a = (aold * aold) - (bold * bold) + x;
b = 2.0 * aold*bold + y;
zmagsqr = a*a + b*b;
aold = a;
bold = b;
}
//output is a 1D array, so we need to index using our row and
//column number
output[row * M.width + col] = iter;
}
}
/*
* Function to package globals for easier sending to
* device.
*/
void makeMandel(MandelBrot* M){
M.iterations = MAX_ITER;
M.resolution = RESOLUTION;
M.ycenter = YCENTER;
M.xcenter = XCENTER;
}
int main(int argc, char **argv)
{
int height, width;
size_t size;
cudaError_t error;
getParameters(argc, argv, &height, &width, &size);
bmpfile_t *bmp;
rgb_pixel_t pixel = {0, 0, 0, 0};
int xoffset = -(width - 1) /2;
int yoffset = (height -1) / 2;
bmp = bmp_create(width, height, 32);
Mandelbrot h_mandel;
makeMandel(&h_mandel);
h_mandel.width = width;
h_mandel.height = height;
//memory to hold results
float* h_xy = ( float*) malloc (size);
//allocate device memory
float* d_xy;
error = cudaMalloc(&d_xy, size);
if (error != cudaSuccess)
{
printf("cudaMalloc d_xy returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
Mandelbrot* d_Mandel;
error = cudaMalloc(&d_Mandel, sizeof(Mandelbrot));
if (error != cudaSuccess)
{
printf("cudaMalloc d_Mandel returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
//copy Mandelbrot metadata to device
error = cudaMemcpy(d_Mandel, h_mandel, sizeof(Mandelbrot), cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_Mandel,h_mandel) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
//figure out blocks
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//figure out threads
dim3 dimGrid(h_mandel.width / dimBlock.x + 1, h_mandel.height / dimBlock.y + 1);
//call kernel function
MandelbrotFractal<<<dimGrid, dimBlock>>>(d_xy, d_Mandel);
//get data from kernel to device
error = cudaMemcpy(h_xy, d_xy, size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_xy,d_xy) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
/* Generate the colour of the pixel from the **iter** value */
/* You can mess around with the colour settings to use different gradients */
/* Colour currently maps from royal blue to red */
/* We're interested in iter */
int i;
for (i = 0; i < width*height; i++){
x_col = (COLOUR_MAX - (( ((float) iter / ((float) MAX_ITER) * GRADIENT_COLOUR_MAX))));
GroundColorMix(color, x_col, 1, COLOUR_DEPTH);
pixel.red = color[0];
pixel.green = color[1];
pixel.blue = color[2];
int row = i/width;
int col = i % width;
//adds pixel color to image
bmp_set_pixel(bmp, col, row, pixel);
}
bmp_save(bmp, FILENAME);
//free all memory used
bmp_destroy(bmp);
free(h_xy);
free(h_mandel);
cudaFree(d_xy);
cudaFree(d_Mandel);
return 0;
}
|
ddc4fb44c20cffb4f8cfcc6d8446dbc96d8ed656.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* In the current application, `N` is larger than the grid.
* Refactor this kernel to use a grid-stride loop in order that
* each parallel thread work on more than one element of the array.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
int scount;
i = blockIdx.x * blockDim.x + threadIdx.x;
scount = gridDim.x * blockDim.x;
for (; i < N; i += scount) // In this GPU thread: Jump and run all possible data elements
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
/*
* `N` is greater than the size of the grid (see below).
*/
int N = 10000;
int *a;
size_t size = N * sizeof(int);
hipMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
hipFree(a);
}
| ddc4fb44c20cffb4f8cfcc6d8446dbc96d8ed656.cu | #include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* In the current application, `N` is larger than the grid.
* Refactor this kernel to use a grid-stride loop in order that
* each parallel thread work on more than one element of the array.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
int scount;
i = blockIdx.x * blockDim.x + threadIdx.x;
scount = gridDim.x * blockDim.x;
for (; i < N; i += scount) // In this GPU thread: Jump and run all possible data elements
{
a[i] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
/*
* `N` is greater than the size of the grid (see below).
*/
int N = 10000;
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
cudaFree(a);
}
|
ef29800d01557c86b118566236ef47933d1788b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_hardswish(const float *input_, float *output_, int n_data_size_)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n_data_size_)return;
if (input_[i] >= 3.0f)
{
output_[i] = input_[i];
}
else if (input_[i] <= -3.0f)
{
output_[i] = 0.0f;
}
else
{
output_[i] = input_[i] * (input_[i] + 3.0f) / 6.0f;
}
} | ef29800d01557c86b118566236ef47933d1788b0.cu | #include "includes.h"
__global__ void kernel_hardswish(const float *input_, float *output_, int n_data_size_)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i >= n_data_size_)return;
if (input_[i] >= 3.0f)
{
output_[i] = input_[i];
}
else if (input_[i] <= -3.0f)
{
output_[i] = 0.0f;
}
else
{
output_[i] = input_[i] * (input_[i] + 3.0f) / 6.0f;
}
} |
5fc6fbf20305f5d87cc2a38992f4082e537c83d5.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h> // FOR DEBUG!!
#include <helper_cuda.h>
#include "cuda_bluebottle.h"
#include "cuda_particle.h"
__constant__ dom_struct _dom;
cuda_blocks_struct blocks;
extern "C"
void cuda_check_errors(int line)
{
printf("N%d >> Checking errors on line %d\n", rank, line);
checkCudaErrors(hipDeviceSynchronize());
}
extern "C"
int cuda_device_count(void)
{
// Get number of cuda devices
int dev_count = 0;
checkCudaErrors(hipGetDeviceCount(&dev_count));
return dev_count;
}
extern "C"
void cuda_device_init(int device)
{
checkCudaErrors(hipSetDevice(device));
}
extern "C"
void cuda_enable_peer(void)
{
int target_peer = (rank + 1) % nprocs;
printf("Enabling peer access from %d to %d\n", rank, target_peer);
checkCudaErrors(hipDeviceEnablePeerAccess(target_peer, 0));
}
extern "C"
void cuda_block(void)
{
hipDeviceSynchronize();
}
extern "C"
void cuda_dom_malloc_host(void)
{
//printf("N%d >> Allocating pinned host memory... \n", rank);
// Allocate (pinned) device memory on host
checkCudaErrors(hipHostMalloc(&p, dom[rank].Gcc.s3b * sizeof(real)));
cpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&p0, dom[rank].Gcc.s3b * sizeof(real)));
cpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&phi, dom[rank].Gcc.s3b * sizeof(real)));
cpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&u0, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&v0, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&w0, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&conv_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&conv_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&conv_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&conv0_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&conv0_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&conv0_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&diff_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&diff_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&diff_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&diff0_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&diff0_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&diff0_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&f_x, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&f_y, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&f_z, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&u_star, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&v_star, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&w_star, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipHostMalloc(&flag_u, dom[rank].Gfx.s3b * sizeof(int)));
cpumem += dom[rank].Gfx.s3b * sizeof(int);
checkCudaErrors(hipHostMalloc(&flag_v, dom[rank].Gfy.s3b * sizeof(int)));
cpumem += dom[rank].Gfy.s3b * sizeof(int);
checkCudaErrors(hipHostMalloc(&flag_w, dom[rank].Gfz.s3b * sizeof(int)));
cpumem += dom[rank].Gfz.s3b * sizeof(int);
}
extern "C"
void cuda_dom_malloc_dev(void)
{
// Allocate device memory on device
// Don't need to free device constant memory
checkCudaErrors(hipMemcpyToSymbol(_dom, &dom[rank], sizeof(dom_struct)));
checkCudaErrors(hipMalloc((void**) &_DOM, sizeof(dom_struct)));
gpumem += sizeof(dom_struct);
checkCudaErrors(hipMemcpy(_DOM, &DOM, sizeof(dom_struct),
hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void**) &_bc, sizeof(BC)));
gpumem += sizeof(BC);
checkCudaErrors(hipMemcpy(_bc, &bc, sizeof(BC),
hipMemcpyHostToDevice));
/* Flow solver variables */
checkCudaErrors(hipMalloc(&_phi, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_phinoghost, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(hipMalloc(&_invM, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(hipMalloc(&_p, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_p0, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_u0, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_v0, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_w0, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_conv_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_conv_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_conv_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_conv0_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_conv0_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_conv0_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_diff_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_diff_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_diff_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_diff0_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_diff0_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_diff0_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_f_x, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_f_y, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_f_z, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_u_star, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_v_star, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_w_star, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
// Flags
checkCudaErrors(hipMalloc(&_flag_u, dom[rank].Gfx.s3b * sizeof(int)));
gpumem += dom[rank].Gfx.s3b * sizeof(int);
checkCudaErrors(hipMalloc(&_flag_v, dom[rank].Gfy.s3b * sizeof(int)));
gpumem += dom[rank].Gfy.s3b * sizeof(int);
checkCudaErrors(hipMalloc(&_flag_w, dom[rank].Gfz.s3b * sizeof(int)));
gpumem += dom[rank].Gfz.s3b * sizeof(int);
/* Poisson Equation Variables */
checkCudaErrors(hipMalloc(&_r_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(hipMalloc(&_z_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
//checkCudaErrors(hipMalloc(&_rs_0, dom[rank].Gcc.s3 * sizeof(real)));
// gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(hipMalloc(&_p_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
//checkCudaErrors(hipMalloc(&_s_q, dom[rank].Gcc.s3 * sizeof(real)));
// gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(hipMalloc(&_Apb_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
//checkCudaErrors(hipMalloc(&_Asb_q, dom[rank].Gcc.s3 * sizeof(real)));
// gpumem += dom[rank].Gcc.s3 * sizeof(real);
// These are s3b because the SpMv requires more info
checkCudaErrors(hipMalloc(&_rhs_p, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(hipMalloc(&_pb_q, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
//checkCudaErrors(hipMalloc(&_sb_q, dom[rank].Gcc.s3b * sizeof(real)));
// gpumem += dom[rank].Gcc.s3b * sizeof(real);
/* Subdomain communication variables */
// Outer computational planes
checkCudaErrors(hipMalloc(&_send_Gcc_e, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gcc_w, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gcc_n, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gcc_s, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gcc_t, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gcc_b, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfx_e, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfx_w, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfx_n, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfx_s, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfx_t, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfx_b, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfy_e, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfy_w, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfy_n, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfy_s, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfy_t, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfy_b, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfz_e, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfz_w, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfz_n, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfz_s, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfz_t, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_send_Gfz_b, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
// Ghost cell planes
checkCudaErrors(hipMalloc(&_recv_Gcc_e, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gcc_w, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gcc_n, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gcc_s, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gcc_t, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gcc_b, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfx_e, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfx_w, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfx_n, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfx_s, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfx_t, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfx_b, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfy_e, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfy_w, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfy_n, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfy_s, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfy_t, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfy_b, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfz_e, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfz_w, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfz_n, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfz_s, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfz_t, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
checkCudaErrors(hipMalloc(&_recv_Gfz_b, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
// Init things that we will need
checkCudaErrors(hipMemset(_u, 0., dom[rank].Gfx.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_v, 0., dom[rank].Gfy.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_w, 0., dom[rank].Gfz.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_p, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_u0, 0., dom[rank].Gfx.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_v0, 0., dom[rank].Gfy.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_w0, 0., dom[rank].Gfz.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_p0, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_phi, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_rhs_p, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_p_q, 0., dom[rank].Gcc.s3 * sizeof(real)));
checkCudaErrors(hipMemset(_pb_q, 0., dom[rank].Gcc.s3b * sizeof(real)));
//checkCudaErrors(hipMemset(_s_q, 0., dom[rank].Gcc.s3 * sizeof(real)));
//checkCudaErrors(hipMemset(_sb_q, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gcc_e, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gcc_w, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gcc_n, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gcc_s, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gcc_t, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gcc_b, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfx_e, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfx_w, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfx_n, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfx_s, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfx_t, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfx_b, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfy_e, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfy_w, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfy_n, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfy_s, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfy_t, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfy_b, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfz_e, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfz_w, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfz_n, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfz_s, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfz_t, 0., dom[rank].Gfz.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_send_Gfz_b, 0., dom[rank].Gfz.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gcc_e, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gcc_w, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gcc_n, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gcc_s, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gcc_t, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gcc_b, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfx_e, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfx_w, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfx_n, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfx_s, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfx_t, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfx_b, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfy_e, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfy_w, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfy_n, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfy_s, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfy_t, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfy_b, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfz_e, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfz_w, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfz_n, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfz_s, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfz_t, 0., dom[rank].Gfz.s2_k * sizeof(real)));
checkCudaErrors(hipMemset(_recv_Gfz_b, 0., dom[rank].Gfz.s2_k * sizeof(real)));
}
extern "C"
void cuda_update_bc(void)
{
printf("\nupdate bc\n");
hipLaunchKernelGGL(( update_vel_BC), dim3(1), dim3(1), 0, 0, _bc, v_bc_tdelay, ttime);
}
extern "C"
void cuda_dom_push(void)
{
// Push initialized domain data from host to device
checkCudaErrors(hipMemcpy(_p, p, dom[rank].Gcc.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_u, u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_v, v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_w, w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_p0, p0, dom[rank].Gcc.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_u0, u0, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_v0, v0, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_w0, w0, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_phi, phi, dom[rank].Gcc.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_u_star, u_star, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_v_star, v_star, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_w_star, w_star, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_conv_u, conv_u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_conv_v, conv_v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_conv_w, conv_w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_conv0_u, conv0_u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_conv0_v, conv0_v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_conv0_w, conv0_w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_diff_u, diff_u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_diff_v, diff_v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_diff_w, diff_w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_diff0_u, diff0_u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_diff0_v, diff0_v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_diff0_w, diff0_w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_f_x, f_x, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_f_y, f_y, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(_f_z, f_z, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyHostToDevice));
}
extern "C"
void cuda_blocks_init()
{
//printf("N%d >> Creating cuda thread dimensions and size\n", rank);
gpumem += sizeof(cuda_blocks_struct);
int threads_x = 0;
int threads_y = 0;
int threads_z = 0;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
/* Computational Grid - Gcc */
threads_x = dom[rank].Gcc.in * (dom[rank].Gcc.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gcc.jn * (dom[rank].Gcc.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gcc.kn * (dom[rank].Gcc.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gcc.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gcc.kn / (real) threads_z);
// Create tmp variables
dim3 Gcc_dim_in(threads_y, threads_z);
dim3 Gcc_dim_jn(threads_z, threads_x);
dim3 Gcc_dim_kn(threads_x, threads_y);
dim3 Gcc_num_in(blocks_y, blocks_z);
dim3 Gcc_num_jn(blocks_z, blocks_x);
dim3 Gcc_num_kn(blocks_x, blocks_y);
dim3 Gcc_dim_s3(threads_x, threads_y, threads_z);
dim3 Gcc_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gcc.dim_in = Gcc_dim_in;
blocks.Gcc.dim_jn = Gcc_dim_jn;
blocks.Gcc.dim_kn = Gcc_dim_kn;
blocks.Gcc.num_in = Gcc_num_in;
blocks.Gcc.num_jn = Gcc_num_jn;
blocks.Gcc.num_kn = Gcc_num_kn;
blocks.Gcc.dim_s3 = Gcc_dim_s3;
blocks.Gcc.num_s3 = Gcc_num_s3;
/* Computational Shared Grid, GCC */
threads_x = (dom[rank].Gcc.in+2) *((dom[rank].Gcc.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gcc.jn+2) *((dom[rank].Gcc.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gcc.kn+2) *((dom[rank].Gcc.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gcc.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gcc.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gcc_dim_in_s(threads_y, threads_z);
dim3 Gcc_dim_jn_s(threads_z, threads_x);
dim3 Gcc_dim_kn_s(threads_x, threads_y);
dim3 Gcc_num_in_s(blocks_y, blocks_z);
dim3 Gcc_num_jn_s(blocks_z, blocks_x);
dim3 Gcc_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gcc.dim_in_s = Gcc_dim_in_s;
blocks.Gcc.dim_jn_s = Gcc_dim_jn_s;
blocks.Gcc.dim_kn_s = Gcc_dim_kn_s;
blocks.Gcc.num_in_s = Gcc_num_in_s;
blocks.Gcc.num_jn_s = Gcc_num_jn_s;
blocks.Gcc.num_kn_s = Gcc_num_kn_s;
/* Computational Grid - Gfx */
threads_x = dom[rank].Gfx.in * (dom[rank].Gfx.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfx.jn * (dom[rank].Gfx.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfx.kn * (dom[rank].Gfx.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfx.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfx.kn / (real) threads_z);
// Create tmp variables
dim3 Gfx_dim_in(threads_y, threads_z);
dim3 Gfx_dim_jn(threads_z, threads_x);
dim3 Gfx_dim_kn(threads_x, threads_y);
dim3 Gfx_num_in(blocks_y, blocks_z);
dim3 Gfx_num_jn(blocks_z, blocks_x);
dim3 Gfx_num_kn(blocks_x, blocks_y);
dim3 Gfx_dim_s3(threads_x, threads_y, threads_z);
dim3 Gfx_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfx.dim_in = Gfx_dim_in;
blocks.Gfx.dim_jn = Gfx_dim_jn;
blocks.Gfx.dim_kn = Gfx_dim_kn;
blocks.Gfx.num_in = Gfx_num_in;
blocks.Gfx.num_jn = Gfx_num_jn;
blocks.Gfx.num_kn = Gfx_num_kn;
blocks.Gfx.dim_s3 = Gfx_dim_s3;
blocks.Gfx.num_s3 = Gfx_num_s3;
/* Computational Shared Grid - Gfx */
threads_x = (dom[rank].Gfx.in+2) *((dom[rank].Gfx.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfx.jn+2) *((dom[rank].Gfx.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfx.kn+2) *((dom[rank].Gfx.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfx.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfx.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfx_dim_in_s(threads_y, threads_z);
dim3 Gfx_dim_jn_s(threads_z, threads_x);
dim3 Gfx_dim_kn_s(threads_x, threads_y);
dim3 Gfx_num_in_s(blocks_y, blocks_z);
dim3 Gfx_num_jn_s(blocks_z, blocks_x);
dim3 Gfx_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfx.dim_in_s = Gfx_dim_in_s;
blocks.Gfx.dim_jn_s = Gfx_dim_jn_s;
blocks.Gfx.dim_kn_s = Gfx_dim_kn_s;
blocks.Gfx.num_in_s = Gfx_num_in_s;
blocks.Gfx.num_jn_s = Gfx_num_jn_s;
blocks.Gfx.num_kn_s = Gfx_num_kn_s;
/* Computational Grid - Gfy */
threads_x = dom[rank].Gfy.in * (dom[rank].Gfy.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfy.jn * (dom[rank].Gfy.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfy.kn * (dom[rank].Gfy.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfy.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfy.kn / (real) threads_z);
// Create tmp variables
dim3 Gfy_dim_in(threads_y, threads_z);
dim3 Gfy_dim_jn(threads_z, threads_x);
dim3 Gfy_dim_kn(threads_x, threads_y);
dim3 Gfy_num_in(blocks_y, blocks_z);
dim3 Gfy_num_jn(blocks_z, blocks_x);
dim3 Gfy_num_kn(blocks_x, blocks_y);
dim3 Gfy_dim_s3(threads_x, threads_y, threads_z);
dim3 Gfy_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfy.dim_in = Gfy_dim_in;
blocks.Gfy.dim_jn = Gfy_dim_jn;
blocks.Gfy.dim_kn = Gfy_dim_kn;
blocks.Gfy.num_in = Gfy_num_in;
blocks.Gfy.num_jn = Gfy_num_jn;
blocks.Gfy.num_kn = Gfy_num_kn;
blocks.Gfy.dim_s3 = Gfy_dim_s3;
blocks.Gfy.num_s3 = Gfy_num_s3;
/* Computational Shared Grid - Gfy */
threads_x = (dom[rank].Gfy.in+2) *((dom[rank].Gfy.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfy.jn+2) *((dom[rank].Gfy.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfy.kn+2) *((dom[rank].Gfy.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfy.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfy.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfy_dim_in_s(threads_y, threads_z);
dim3 Gfy_dim_jn_s(threads_z, threads_x);
dim3 Gfy_dim_kn_s(threads_x, threads_y);
dim3 Gfy_num_in_s(blocks_y, blocks_z);
dim3 Gfy_num_jn_s(blocks_z, blocks_x);
dim3 Gfy_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfy.dim_in_s = Gfy_dim_in_s;
blocks.Gfy.dim_jn_s = Gfy_dim_jn_s;
blocks.Gfy.dim_kn_s = Gfy_dim_kn_s;
blocks.Gfy.num_in_s = Gfy_num_in_s;
blocks.Gfy.num_jn_s = Gfy_num_jn_s;
blocks.Gfy.num_kn_s = Gfy_num_kn_s;
/* Computational Grid - Gfz */
threads_x = dom[rank].Gfz.in * (dom[rank].Gfz.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfz.jn * (dom[rank].Gfz.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfz.kn * (dom[rank].Gfz.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfz.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfz.kn / (real) threads_z);
// Create tmp variables
dim3 Gfz_dim_in(threads_y, threads_z);
dim3 Gfz_dim_jn(threads_z, threads_x);
dim3 Gfz_dim_kn(threads_x, threads_y);
dim3 Gfz_num_in(blocks_y, blocks_z);
dim3 Gfz_num_jn(blocks_z, blocks_x);
dim3 Gfz_num_kn(blocks_x, blocks_y);
dim3 Gfz_dim_s3(threads_x, threads_y, threads_z);
dim3 Gfz_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfz.dim_in = Gfz_dim_in;
blocks.Gfz.dim_jn = Gfz_dim_jn;
blocks.Gfz.dim_kn = Gfz_dim_kn;
blocks.Gfz.num_in = Gfz_num_in;
blocks.Gfz.num_jn = Gfz_num_jn;
blocks.Gfz.num_kn = Gfz_num_kn;
blocks.Gfz.dim_s3 = Gfz_dim_s3;
blocks.Gfz.num_s3 = Gfz_num_s3;
/* Computational Shared Grid - Gfz */
threads_x = (dom[rank].Gfz.in+2) *((dom[rank].Gfz.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfz.jn+2) *((dom[rank].Gfz.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfz.kn+2) *((dom[rank].Gfz.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfz.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfz.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfz_dim_in_s(threads_y, threads_z);
dim3 Gfz_dim_jn_s(threads_z, threads_x);
dim3 Gfz_dim_kn_s(threads_x, threads_y);
dim3 Gfz_num_in_s(blocks_y, blocks_z);
dim3 Gfz_num_jn_s(blocks_z, blocks_x);
dim3 Gfz_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfz.dim_in_s = Gfz_dim_in_s;
blocks.Gfz.dim_jn_s = Gfz_dim_jn_s;
blocks.Gfz.dim_kn_s = Gfz_dim_kn_s;
blocks.Gfz.num_in_s = Gfz_num_in_s;
blocks.Gfz.num_jn_s = Gfz_num_jn_s;
blocks.Gfz.num_kn_s = Gfz_num_kn_s;
/* Ghost grid - Gcc */
threads_x = dom[rank].Gcc.inb * (dom[rank].Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gcc.jnb * (dom[rank].Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gcc.knb * (dom[rank].Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gcc.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gcc.knb / (real) threads_z);
// Create tmp variables
dim3 Gcc_dim_inb(threads_y, threads_z);
dim3 Gcc_dim_jnb(threads_z, threads_x);
dim3 Gcc_dim_knb(threads_x, threads_y);
dim3 Gcc_num_inb(blocks_y, blocks_z);
dim3 Gcc_num_jnb(blocks_z, blocks_x);
dim3 Gcc_num_knb(blocks_x, blocks_y);
dim3 Gcc_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gcc_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gcc.dim_inb = Gcc_dim_inb;
blocks.Gcc.dim_jnb = Gcc_dim_jnb;
blocks.Gcc.dim_knb = Gcc_dim_knb;
blocks.Gcc.num_inb = Gcc_num_inb;
blocks.Gcc.num_jnb = Gcc_num_jnb;
blocks.Gcc.num_knb = Gcc_num_knb;
blocks.Gcc.dim_s3b = Gcc_dim_s3b;
blocks.Gcc.num_s3b = Gcc_num_s3b;
/* Ghost Shared Grid - Gcc */
threads_x = (dom[rank].Gcc.inb+2) *((dom[rank].Gcc.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gcc.jnb+2) *((dom[rank].Gcc.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gcc.knb+2) *((dom[rank].Gcc.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gcc.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gcc.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gcc_dim_inb_s(threads_y, threads_z);
dim3 Gcc_dim_jnb_s(threads_z, threads_x);
dim3 Gcc_dim_knb_s(threads_x, threads_y);
dim3 Gcc_num_inb_s(blocks_y, blocks_z);
dim3 Gcc_num_jnb_s(blocks_z, blocks_x);
dim3 Gcc_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gcc.dim_inb_s = Gcc_dim_inb_s;
blocks.Gcc.dim_jnb_s = Gcc_dim_jnb_s;
blocks.Gcc.dim_knb_s = Gcc_dim_knb_s;
blocks.Gcc.num_inb_s = Gcc_num_inb_s;
blocks.Gcc.num_jnb_s = Gcc_num_jnb_s;
blocks.Gcc.num_knb_s = Gcc_num_knb_s;
/* Ghost grid - Gfx */
threads_x = dom[rank].Gfx.inb * (dom[rank].Gfx.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfx.jnb * (dom[rank].Gfx.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfx.knb * (dom[rank].Gfx.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfx.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfx.knb / (real) threads_z);
// Create tmp variables
dim3 Gfx_dim_inb(threads_y, threads_z);
dim3 Gfx_dim_jnb(threads_z, threads_x);
dim3 Gfx_dim_knb(threads_x, threads_y);
dim3 Gfx_num_inb(blocks_y, blocks_z);
dim3 Gfx_num_jnb(blocks_z, blocks_x);
dim3 Gfx_num_knb(blocks_x, blocks_y);
dim3 Gfx_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gfx_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfx.dim_inb = Gfx_dim_inb;
blocks.Gfx.dim_jnb = Gfx_dim_jnb;
blocks.Gfx.dim_knb = Gfx_dim_knb;
blocks.Gfx.num_inb = Gfx_num_inb;
blocks.Gfx.num_jnb = Gfx_num_jnb;
blocks.Gfx.num_knb = Gfx_num_knb;
blocks.Gfx.dim_s3b = Gfx_dim_s3b;
blocks.Gfx.num_s3b = Gfx_num_s3b;
/* Ghost Shared Grid - Gfx */
threads_x = (dom[rank].Gfx.inb+2) *((dom[rank].Gfx.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfx.jnb+2) *((dom[rank].Gfx.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfx.knb+2) *((dom[rank].Gfx.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfx.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfx.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfx_dim_inb_s(threads_y, threads_z);
dim3 Gfx_dim_jnb_s(threads_z, threads_x);
dim3 Gfx_dim_knb_s(threads_x, threads_y);
dim3 Gfx_num_inb_s(blocks_y, blocks_z);
dim3 Gfx_num_jnb_s(blocks_z, blocks_x);
dim3 Gfx_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfx.dim_inb_s = Gfx_dim_inb_s;
blocks.Gfx.dim_jnb_s = Gfx_dim_jnb_s;
blocks.Gfx.dim_knb_s = Gfx_dim_knb_s;
blocks.Gfx.num_inb_s = Gfx_num_inb_s;
blocks.Gfx.num_jnb_s = Gfx_num_jnb_s;
blocks.Gfx.num_knb_s = Gfx_num_knb_s;
/* Ghost grid - Gfy */
threads_x = dom[rank].Gfy.inb * (dom[rank].Gfy.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfy.jnb * (dom[rank].Gfy.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfy.knb * (dom[rank].Gfy.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfy.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfy.knb / (real) threads_z);
// Create tmp variables
dim3 Gfy_dim_inb(threads_y, threads_z);
dim3 Gfy_dim_jnb(threads_z, threads_x);
dim3 Gfy_dim_knb(threads_x, threads_y);
dim3 Gfy_num_inb(blocks_y, blocks_z);
dim3 Gfy_num_jnb(blocks_z, blocks_x);
dim3 Gfy_num_knb(blocks_x, blocks_y);
dim3 Gfy_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gfy_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfy.dim_inb = Gfy_dim_inb;
blocks.Gfy.dim_jnb = Gfy_dim_jnb;
blocks.Gfy.dim_knb = Gfy_dim_knb;
blocks.Gfy.num_inb = Gfy_num_inb;
blocks.Gfy.num_jnb = Gfy_num_jnb;
blocks.Gfy.num_knb = Gfy_num_knb;
blocks.Gfy.dim_s3b = Gfy_dim_s3b;
blocks.Gfy.num_s3b = Gfy_num_s3b;
/* Ghost Shared Grid - Gfy */
threads_x = (dom[rank].Gfy.inb+2) *((dom[rank].Gfy.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfy.jnb+2) *((dom[rank].Gfy.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfy.knb+2) *((dom[rank].Gfy.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfy.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfy.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfy_dim_inb_s(threads_y, threads_z);
dim3 Gfy_dim_jnb_s(threads_z, threads_x);
dim3 Gfy_dim_knb_s(threads_x, threads_y);
dim3 Gfy_num_inb_s(blocks_y, blocks_z);
dim3 Gfy_num_jnb_s(blocks_z, blocks_x);
dim3 Gfy_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfy.dim_inb_s = Gfy_dim_inb_s;
blocks.Gfy.dim_jnb_s = Gfy_dim_jnb_s;
blocks.Gfy.dim_knb_s = Gfy_dim_knb_s;
blocks.Gfy.num_inb_s = Gfy_num_inb_s;
blocks.Gfy.num_jnb_s = Gfy_num_jnb_s;
blocks.Gfy.num_knb_s = Gfy_num_knb_s;
/* Ghost grid - Gfz */
threads_x = dom[rank].Gfz.inb * (dom[rank].Gfz.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfz.jnb * (dom[rank].Gfz.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfz.knb * (dom[rank].Gfz.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfz.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfz.knb / (real) threads_z);
// Create tmp variables
dim3 Gfz_dim_inb(threads_y, threads_z);
dim3 Gfz_dim_jnb(threads_z, threads_x);
dim3 Gfz_dim_knb(threads_x, threads_y);
dim3 Gfz_num_inb(blocks_y, blocks_z);
dim3 Gfz_num_jnb(blocks_z, blocks_x);
dim3 Gfz_num_knb(blocks_x, blocks_y);
dim3 Gfz_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gfz_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfz.dim_inb = Gfz_dim_inb;
blocks.Gfz.dim_jnb = Gfz_dim_jnb;
blocks.Gfz.dim_knb = Gfz_dim_knb;
blocks.Gfz.num_inb = Gfz_num_inb;
blocks.Gfz.num_jnb = Gfz_num_jnb;
blocks.Gfz.num_knb = Gfz_num_knb;
blocks.Gfz.dim_s3b = Gfz_dim_s3b;
blocks.Gfz.num_s3b = Gfz_num_s3b;
/* Ghost Shared Grid - Gfz */
threads_x = (dom[rank].Gfz.inb+2) *((dom[rank].Gfz.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfz.jnb+2) *((dom[rank].Gfz.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfz.knb+2) *((dom[rank].Gfz.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfz.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfz.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfz_dim_inb_s(threads_y, threads_z);
dim3 Gfz_dim_jnb_s(threads_z, threads_x);
dim3 Gfz_dim_knb_s(threads_x, threads_y);
dim3 Gfz_num_inb_s(blocks_y, blocks_z);
dim3 Gfz_num_jnb_s(blocks_z, blocks_x);
dim3 Gfz_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfz.dim_inb_s = Gfz_dim_inb_s;
blocks.Gfz.dim_jnb_s = Gfz_dim_jnb_s;
blocks.Gfz.dim_knb_s = Gfz_dim_knb_s;
blocks.Gfz.num_inb_s = Gfz_num_inb_s;
blocks.Gfz.num_jnb_s = Gfz_num_jnb_s;
blocks.Gfz.num_knb_s = Gfz_num_knb_s;
}
extern "C"
void cuda_dom_BC(void)
{
//printf("N%d >> Applying boundary conditions to u_star.\n", rank);
// Check whether each subdom boundary is an external boundary, then
// apply the correct boundary conditions to all fields on that face
// Only apply boundary conditions on the inner [*n x *n] plane, not the
// [*nb x *nb] -- this ensures we don't set the points that don't contain
// any solution, and we also don't set points twice
/* WEST */
if (dom[rank].w == MPI_PROC_NULL) {
switch (bc.pW) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_W_N), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, _p);
break;
}
switch (bc.uW) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_W_D), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u, bc.uWD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_W_N), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u);
break;
}
switch (bc.vW) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_W_D), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v, bc.vWD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_W_N), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v);
break;
}
switch (bc.wW) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_W_D), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w, bc.wWD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_W_N), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w);
break;
}
}
/* EAST */
if (dom[rank].e == MPI_PROC_NULL) {
switch (bc.pE) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_E_N), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, _p);
}
switch (bc.uE) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_E_D), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u, bc.uED);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_E_N), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u);
break;
}
switch (bc.vE) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_E_D), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v, bc.vED);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_E_N), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v);
break;
}
switch (bc.wE) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_E_D), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w, bc.wED);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_E_N), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w);
break;
}
}
/* SOUTH */
if (dom[rank].s == MPI_PROC_NULL) {
switch (bc.pS) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_S_N), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, _p);
}
switch (bc.uS) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_S_D), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u, bc.uSD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_S_N), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u);
break;
}
switch (bc.vS) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_S_D), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v, bc.vSD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_S_N), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v);
break;
}
switch (bc.wS) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_S_D), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w, bc.wSD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_S_N), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w);
break;
}
}
/* NORTH */
if (dom[rank].n == MPI_PROC_NULL) {
switch (bc.pN) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_N_N), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, _p);
}
switch (bc.uN) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_N_D), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u, bc.uND);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_N_N), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u);
break;
}
switch (bc.vN) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_N_D), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v, bc.vND);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_N_N), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v);
break;
}
switch (bc.wN) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_N_D), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w, bc.wND);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_N_N), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w);
break;
}
}
/* BOTTOM */
if (dom[rank].b == MPI_PROC_NULL) {
switch (bc.pB) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_B_N), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, _p);
}
switch (bc.uB) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_B_D), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u, bc.uBD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_B_N), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u);
break;
}
switch (bc.vB) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_B_D), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v, bc.vBD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_B_N), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v);
break;
}
switch (bc.wB) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_B_D), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w, bc.wBD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_B_N), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w);
break;
}
}
/* TOP */
if (dom[rank].t == MPI_PROC_NULL) {
switch (bc.pT) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_T_N), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, _p);
}
switch (bc.uT) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_T_D), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u, bc.uTD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_T_N), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u);
break;
}
switch (bc.vT) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_T_D), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v, bc.vTD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_T_N), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v);
break;
}
switch (bc.wT) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_T_D), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w, bc.wTD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_T_N), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w);
break;
}
}
}
extern "C"
void cuda_dom_pull(void)
{
// Pull domain data from device to host
checkCudaErrors(hipMemcpy(p, _p, dom[rank].Gcc.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(u, _u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(v, _v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(w, _w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyDeviceToHost));
}
extern "C"
void cuda_dom_pull_phase(void)
{
checkCudaErrors(hipMemcpy(phase, _phase, dom[rank].Gcc.s3b * sizeof(int),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(phase_shell, _phase_shell, dom[rank].Gcc.s3b * sizeof(int),
hipMemcpyDeviceToHost));
}
extern "C"
void cuda_dom_pull_debug(void)
{
//printf("N%d >> Pulling dom device->host (debug)\n", rank);
checkCudaErrors(hipMemcpy(phi, _phi, dom[rank].Gcc.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(u_star, _u_star, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(v_star, _v_star, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(w_star, _w_star, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(conv_u, _conv_u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(conv_v, _conv_v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(conv_w, _conv_w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(diff_u, _diff_u, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(diff_v, _diff_v, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(diff_w, _diff_w, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(flag_u, _flag_u, dom[rank].Gfx.s3b * sizeof(int),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(flag_v, _flag_v, dom[rank].Gfy.s3b * sizeof(int),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(flag_w, _flag_w, dom[rank].Gfz.s3b * sizeof(int),
hipMemcpyDeviceToHost));
//if (NPARTS > 0) { // Already pulled in cuda_dom_pull()
// checkCudaErrors(hipMemcpy(phase, _phase, dom[rank].Gcc.s3b * sizeof(int),
// hipMemcpyDeviceToHost));
// checkCudaErrors(hipMemcpy(phase_shell, _phase_shell, dom[rank].Gcc.s3b * sizeof(int),
// hipMemcpyDeviceToHost));
//}
}
extern "C"
void cuda_dom_pull_restart(void) {
checkCudaErrors(hipMemcpy(p0, _p0, dom[rank].Gcc.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(u0, _u0, dom[rank].Gfx.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(v0, _v0, dom[rank].Gfy.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(w0, _w0, dom[rank].Gfz.s3b * sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(conv0_u, _conv0_u, dom[rank].Gfx.s3b *sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(conv0_v, _conv0_v, dom[rank].Gfy.s3b *sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(conv0_w, _conv0_w, dom[rank].Gfz.s3b *sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(diff0_u, _diff0_u, dom[rank].Gfx.s3b *sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(diff0_v, _diff0_v, dom[rank].Gfy.s3b *sizeof(real),
hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(diff0_w, _diff0_w, dom[rank].Gfz.s3b *sizeof(real),
hipMemcpyDeviceToHost));
}
extern "C"
void cuda_self_exchange_i(real *array)
{
hipLaunchKernelGGL(( self_exchange_Gcc_i), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, array);
}
extern "C"
void cuda_self_exchange_j(real *array)
{
hipLaunchKernelGGL(( self_exchange_Gcc_j), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, array);
}
extern "C"
void cuda_self_exchange_k(real *array)
{
hipLaunchKernelGGL(( self_exchange_Gcc_k), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, array);
}
extern "C"
void cuda_pack_planes_Gcc(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gcc_east), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, array,
_send_Gcc_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gcc_west), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, array,
_send_Gcc_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gcc_north), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, array,
_send_Gcc_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gcc_south), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, array,
_send_Gcc_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gcc_top), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, array,
_send_Gcc_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gcc_bottom), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, array,
_send_Gcc_b);
}
extern "C"
void cuda_pack_planes_Gfx(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfx_east), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, array,
_send_Gfx_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfx_west), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, array,
_send_Gfx_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfx_north), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, array,
_send_Gfx_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfx_south), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, array,
_send_Gfx_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfx_top), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, array,
_send_Gfx_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfx_bottom), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, array,
_send_Gfx_b);
}
extern "C"
void cuda_pack_planes_Gfy(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfy_east), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, array,
_send_Gfy_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfy_west), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, array,
_send_Gfy_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfy_north), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, array,
_send_Gfy_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfy_south), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, array,
_send_Gfy_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfy_top), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, array,
_send_Gfy_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfy_bottom), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, array,
_send_Gfy_b);
}
extern "C"
void cuda_pack_planes_Gfz(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfz_east), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, array,
_send_Gfz_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfz_west), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, array,
_send_Gfz_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfz_north), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, array,
_send_Gfz_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfz_south), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, array,
_send_Gfz_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfz_top), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, array,
_send_Gfz_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( pack_planes_Gfz_bottom), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, array,
_send_Gfz_b);
}
extern "C"
void cuda_unpack_planes_Gcc(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gcc_east), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, array,
_recv_Gcc_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gcc_west), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, array,
_recv_Gcc_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gcc_north), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, array,
_recv_Gcc_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gcc_south), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, array,
_recv_Gcc_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gcc_top), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, array,
_recv_Gcc_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gcc_bottom), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, array,
_recv_Gcc_b);
}
extern "C"
void cuda_unpack_planes_Gfx(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfx_east), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, array,
_recv_Gfx_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfx_west), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, array,
_recv_Gfx_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfx_north), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, array,
_recv_Gfx_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfx_south), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, array,
_recv_Gfx_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfx_top), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, array,
_recv_Gfx_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfx_bottom), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, array,
_recv_Gfx_b);
}
extern "C"
void cuda_unpack_planes_Gfy(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfy_east), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, array,
_recv_Gfy_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfy_west), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, array,
_recv_Gfy_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfy_north), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, array,
_recv_Gfy_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfy_south), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, array,
_recv_Gfy_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfy_top), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, array,
_recv_Gfy_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfy_bottom), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, array,
_recv_Gfy_b);
}
extern "C"
void cuda_unpack_planes_Gfz(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfz_east), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, array,
_recv_Gfz_e);
if (dom[rank].w != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfz_west), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, array,
_recv_Gfz_w);
if (dom[rank].n != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfz_north), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, array,
_recv_Gfz_n);
if (dom[rank].s != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfz_south), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, array,
_recv_Gfz_s);
if (dom[rank].t != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfz_top), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, array,
_recv_Gfz_t);
if (dom[rank].b != MPI_PROC_NULL)
hipLaunchKernelGGL(( unpack_planes_Gfz_bottom), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, array,
_recv_Gfz_b);
}
extern "C"
void cuda_find_dt(void)
{
// Only want max values over the computational domain, not ghost domain
// Copy to new array and square the value. Then find max of that result
// and take sqrt
real *utmp;
real *vtmp;
real *wtmp;
hipMalloc((void**) &utmp, sizeof(real)*dom[rank].Gfx.s3);
hipMalloc((void**) &vtmp, sizeof(real)*dom[rank].Gfy.s3);
hipMalloc((void**) &wtmp, sizeof(real)*dom[rank].Gfz.s3);
hipLaunchKernelGGL(( copy_u_square_noghost), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u, utmp);
hipLaunchKernelGGL(( copy_v_square_noghost), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v, vtmp);
hipLaunchKernelGGL(( copy_w_square_noghost), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w, wtmp);
// device pointers to utmp, vtmp, wtmp
thrust::device_ptr<real> t_umax(utmp);
thrust::device_ptr<real> t_vmax(vtmp);
thrust::device_ptr<real> t_wmax(wtmp);
real u_max = thrust::reduce(t_umax, t_umax + dom[rank].Gfx.s3, 0.,
thrust::maximum<real>());
real v_max = thrust::reduce(t_vmax, t_vmax + dom[rank].Gfy.s3, 0.,
thrust::maximum<real>());
real w_max = thrust::reduce(t_wmax, t_wmax + dom[rank].Gfz.s3, 0.,
thrust::maximum<real>());
u_max = sqrt(u_max);
v_max = sqrt(v_max);
w_max = sqrt(w_max);
hipFree(utmp);
hipFree(vtmp);
hipFree(wtmp);
// find dt on each subdomain
dt = u_max/dom[rank].dx + 2.*nu/(dom[rank].dx * dom[rank].dx);
dt += v_max/dom[rank].dy + 2.*nu/(dom[rank].dy * dom[rank].dy);
dt += w_max/dom[rank].dz + 2.*nu/(dom[rank].dz * dom[rank].dz);
dt = CFL/dt;
// MPI reduce to find minimum timestep over all ranks
MPI_Allreduce(MPI_IN_PLACE, &dt, 1, mpi_real, MPI_MIN, MPI_COMM_WORLD);
/* An alternative method is to find max(u,v,w) over all domains, and then
* calculate dt. This will be <= the dt as it is currently calculated.
*/
}
extern "C"
void cuda_compute_forcing(void)
{
// reset forcing arrays
hipLaunchKernelGGL(( forcing_reset_x), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, _f_x);
hipLaunchKernelGGL(( forcing_reset_y), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, _f_y);
hipLaunchKernelGGL(( forcing_reset_z), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, _f_z);
// linearly accelerate pressure gradient from zero
real delta = ttime - p_bc_tdelay;
if (delta >= 0) {
if (gradP.xa == 0) {
gradP.x = gradP.xm;
} else if (fabs(delta*gradP.xa) > fabs(gradP.xm)) {
gradP.x = gradP.xm;
} else {
gradP.x = delta*gradP.xa;
}
if (gradP.ya == 0) {
gradP.y = gradP.ym;
} else if (fabs(delta*gradP.ya) > fabs(gradP.ym)) {
gradP.y = gradP.ym;
} else {
gradP.y = delta*gradP.ya;
}
// Turn off if PID controller is on
if (!(Kp > 0 || Ki > 0 || Kd > 0)) {
if (gradP.za == 0) {
gradP.z = gradP.zm;
} else if (fabs(delta*gradP.za) > fabs(gradP.zm)) {
gradP.z = gradP.zm;
} else {
gradP.z = delta*gradP.za;
}
}
}
gradP.z = gradP.z * cos(osci_f*ttime);
// linearly accelerate gravitational acceleration from zero
delta = ttime - g_bc_tdelay;
if (delta >= 0) {
if (g.xa == 0) {
g.x = g.xm;
} else if (fabs(delta*g.xa) > fabs(g.xm)) {
g.x = g.xm;
} else {
g.x = delta*g.xa;
}
if (g.ya == 0) {
g.y = g.ym;
} else if (fabs(delta*g.ya) > fabs(g.ym)) {
g.y = g.ym;
} else {
g.y = delta*g.ya;
}
if (g.za == 0) {
g.z = g.zm;
} else if (fabs(delta*g.za) > fabs(g.zm)) {
g.z = g.zm;
} else {
g.z = delta*g.za;
}
}
delta = ttime - p_bc_tdelay;
// PID controller
if (delta >= 0) {
if(Kp > 0 || Ki > 0 || Kd > 0) {
/* Init execution config */
// Ghost cells
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// No ghost
ty = bins.Gcc.jn * (bins.Gcc.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jn >= MAX_THREADS_DIM);
tz = bins.Gcc.kn * (bins.Gcc.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.kn >= MAX_THREADS_DIM);
by = (int) ceil((real) bins.Gcc.jn / (real) ty);
bz = (int) ceil((real) bins.Gcc.kn / (real) tz);
dim3 bin_num_in(by, bz);
dim3 bin_dim_in(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Allocate memory */
checkCudaErrors(hipMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(hipMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
checkCudaErrors(hipMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(hipMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
real *_wdot;
checkCudaErrors(hipMalloc(&_wdot, bins.Gcc.s3 * sizeof(real)));
thrust::device_ptr<real> t_wdot(_wdot);
if (nparts > 0) {
/* Find each particle's bin */
hipLaunchKernelGGL(( bin_fill_i), dim3(num_nparts), dim3(dim_nparts), 0, 0, _part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
hipLaunchKernelGGL(( find_bin_start_end), dim3(b_nparts), dim3(t_nparts), smem_size, 0, _bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
hipLaunchKernelGGL(( count_bin_parts_i), dim3(bin_num_inb), dim3(bin_dim_inb), 0, 0, _bin_start, _bin_end,
_bin_count);
/* Pull wdot to an array for each bin */
hipLaunchKernelGGL(( pull_wdot), dim3(bin_num_in), dim3(bin_dim_in), 0, 0, _wdot, _parts, _bin_start,
_bin_count, _part_ind);
} else { // nparts <= 0
checkCudaErrors(hipMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_part_bin, -1, nparts * sizeof(int)));
checkCudaErrors(hipMemset(_wdot, 0., nparts * sizeof(real)));
}
real acc_z = thrust::reduce(t_wdot, t_wdot + bins.Gcc.s3, 0., thrust::plus<real>());
MPI_Allreduce(MPI_IN_PLACE, &acc_z, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
acc_z /= (real) NPARTS;
pid_int = pid_int + acc_z*dt;
gradP.z = gradP.z
+ (Kp*acc_z + Ki*pid_int/ttime + Kd*(acc_z-pid_back))*rho_avg;
pid_back = acc_z;
checkCudaErrors(hipFree(_wdot));
checkCudaErrors(hipFree(_part_ind));
checkCudaErrors(hipFree(_part_bin));
}
}
// forcing
hipLaunchKernelGGL(( forcing_add_x_const), dim3(blocks.Gfx.num_inb),dim3(blocks.Gfx.dim_inb), 0, 0, -gradP.x/rho_f,
_f_x);
hipLaunchKernelGGL(( forcing_add_y_const), dim3(blocks.Gfy.num_jnb),dim3(blocks.Gfy.dim_jnb), 0, 0, -gradP.y/rho_f,
_f_y);
hipLaunchKernelGGL(( forcing_add_z_const), dim3(blocks.Gfz.num_knb),dim3(blocks.Gfz.dim_knb), 0, 0, -gradP.z/rho_f,
_f_z);
}
extern "C"
void cuda_compute_turb_forcing(void)
{
if (init_cond == TURBULENT) {
/* Calculate current kinetic energy */
real *utmp;
real *vtmp;
real *wtmp;
hipMalloc((void**) &utmp, sizeof(real)*dom[rank].Gfx.s3);
hipMalloc((void**) &vtmp, sizeof(real)*dom[rank].Gfy.s3);
hipMalloc((void**) &wtmp, sizeof(real)*dom[rank].Gfz.s3);
// Square entries
hipLaunchKernelGGL(( copy_u_square_noghost), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u, utmp);
hipLaunchKernelGGL(( copy_v_square_noghost), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v, vtmp);
hipLaunchKernelGGL(( copy_w_square_noghost), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w, wtmp);
// device pointers to utmp, vtmp, wtmp
thrust::device_ptr<real> t_utmp(utmp);
thrust::device_ptr<real> t_vtmp(vtmp);
thrust::device_ptr<real> t_wtmp(wtmp);
// Sum fields
// -- Sum should not double count staggered velocities at subdomain and
// periodic interfaces. For now, just don't loop over those points.
// -- This assumes then that we're using periodic boundary conditions
real su2 = thrust::reduce(t_utmp, t_utmp + dom[rank].Gfx.s3 - dom[rank].Gfx.s2_i,
0., thrust::plus<real>());
real sv2 = thrust::reduce(t_vtmp, t_vtmp + dom[rank].Gfy.s3 - dom[rank].Gfy.s2_j,
0., thrust::plus<real>());
real sw2 = thrust::reduce(t_wtmp, t_wtmp + dom[rank].Gfz.s3 - dom[rank].Gfz.s2_k,
0., thrust::plus<real>());
// Sum results
real k = 0.5 * (su2 + sv2 + sw2);
// Find total energy
MPI_Allreduce(MPI_IN_PLACE, &k, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
// Average
k /= DOM.Gcc.s3;
/* Find mean u,v,w velocity */
// Copy to array with no ghost cells
hipLaunchKernelGGL(( copy_u_noghost), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u, utmp);
hipLaunchKernelGGL(( copy_v_noghost), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v, vtmp);
hipLaunchKernelGGL(( copy_w_noghost), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w, wtmp);
// Sum
// Sum should not double count staggered vels at bouundaries
real umean = thrust::reduce(t_utmp, t_utmp + dom[rank].Gfx.s3 - dom[rank].Gfx.s2_i,
0., thrust::plus<real>());
real vmean = thrust::reduce(t_vtmp, t_vtmp + dom[rank].Gfy.s3 - dom[rank].Gfy.s2_j,
0., thrust::plus<real>());
real wmean = thrust::reduce(t_wtmp, t_wtmp + dom[rank].Gfz.s3 - dom[rank].Gfz.s2_k,
0., thrust::plus<real>());
// Reduce over all ranks
MPI_Allreduce(MPI_IN_PLACE, &umean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, &vmean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, &wmean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
// Average
umean /= DOM.Gfx.s3;
vmean /= DOM.Gfy.s3;
wmean /= DOM.Gfz.s3;
// Calculate forcing
real turb_force = turbA * turb_k0 / k;
// Add forcing to velocity field
hipLaunchKernelGGL(( forcing_add_x_field), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, turb_force,
_u, _f_x);
hipLaunchKernelGGL(( forcing_add_y_field), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, turb_force,
_v, _f_y);
hipLaunchKernelGGL(( forcing_add_z_field), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, turb_force,
_w, _f_z);
// Subtract mean to get perturbation
hipLaunchKernelGGL(( forcing_add_x_const), dim3(blocks.Gfx.num_inb), dim3(blocks.Gfx.dim_inb), 0, 0, -turb_force*umean,
_f_x);
hipLaunchKernelGGL(( forcing_add_y_const), dim3(blocks.Gfy.num_jnb), dim3(blocks.Gfy.dim_jnb), 0, 0, -turb_force*vmean,
_f_y);
hipLaunchKernelGGL(( forcing_add_z_const), dim3(blocks.Gfz.num_knb), dim3(blocks.Gfz.dim_knb), 0, 0, -turb_force*wmean,
_f_z);
// Free
hipFree(utmp);
hipFree(vtmp);
hipFree(wtmp);
/* Dissipation Rate */
real *_eps;
hipMalloc((void**) &_eps, sizeof(real) * dom[rank].Gcc.s3);
hipLaunchKernelGGL(( calc_dissipation), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, _u, _v, _w, _eps);
thrust::device_ptr<real> t_eps(_eps);
real eps = thrust::reduce(t_eps, t_eps + dom[rank].Gcc.s3,
0., thrust::plus<real>());
MPI_Allreduce(MPI_IN_PLACE, &eps, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
eps *= nu / DOM.Gcc.s3;
hipFree(_eps);
// Record this
char rname[FILE_NAME_SIZE] = "turb.rec";
recorder_turb(rname, k, eps);
}
}
extern "C"
void cuda_U_star(void)
{
hipLaunchKernelGGL(( calc_u_star), dim3(blocks.Gfx.num_inb_s), dim3(blocks.Gfx.dim_inb_s), 0, 0, rho_f, nu, _u0,
_v0, _w0, _p0, _f_x, _diff0_u, _conv0_u, _diff_u, _conv_u, _u_star, dt0,
dt, _phase);
hipLaunchKernelGGL(( calc_v_star), dim3(blocks.Gfy.num_jnb_s), dim3(blocks.Gfy.dim_jnb_s), 0, 0, rho_f, nu, _u0,
_v0, _w0, _p0, _f_y, _diff0_v, _conv0_v, _diff_v, _conv_v, _v_star, dt0,
dt, _phase);
hipLaunchKernelGGL(( calc_w_star), dim3(blocks.Gfy.num_knb_s), dim3(blocks.Gfz.dim_knb_s), 0, 0, rho_f, nu, _u0,
_v0, _w0, _p0, _f_z, _diff0_w, _conv0_w, _diff_w, _conv_w, _w_star, dt0,
dt, _phase);
}
extern "C"
void cuda_dom_BC_star(void)
{
// west
if (dom[rank].w == MPI_PROC_NULL) {
// u
switch (bc.uW) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_W_D), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, bc.uWD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_W_N), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star);
break;
}
// v
switch (bc.vW) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_W_D), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v_star, bc.vWD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_W_N), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v_star);
break;
}
// w
switch (bc.wW) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_W_D), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w_star, bc.wWD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_W_N), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w_star);
break;
}
}
// east
if (dom[rank].e == MPI_PROC_NULL) {
// u
switch (bc.uE) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_E_D), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, bc.uED);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_E_N), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star);
break;
}
// v
switch (bc.vE) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_E_D), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v_star, bc.vED);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_E_N), dim3(blocks.Gfy.num_in), dim3(blocks.Gfy.dim_in), 0, 0, _v_star);
break;
}
// w
switch (bc.wE) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_E_D), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w_star, bc.wED);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_E_N), dim3(blocks.Gfz.num_in), dim3(blocks.Gfz.dim_in), 0, 0, _w_star);
break;
}
}
// south
if (dom[rank].s == MPI_PROC_NULL) {
// u
switch (bc.uS) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_S_D), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u_star, bc.uSD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_S_N), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u_star);
break;
}
// v
switch (bc.vS) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_S_D), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, bc.vSD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_S_N), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star);
break;
}
// w
switch (bc.wS) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_S_D), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w_star, bc.wSD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_S_N), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w_star);
break;
}
}
// north
if (dom[rank].n == MPI_PROC_NULL) {
// u
switch (bc.uN) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_N_D), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u_star, bc.uND);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_N_N), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u_star);
break;
}
// v
switch (bc.vN) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_N_D), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, bc.vND);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_N_N), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star);
break;
}
// w
switch (bc.wN) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_N_D), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w_star, bc.wND);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_N_N), dim3(blocks.Gfz.num_jn), dim3(blocks.Gfz.dim_jn), 0, 0, _w_star);
break;
}
}
// bottom
if (dom[rank].b == MPI_PROC_NULL) {
// u
switch (bc.uB) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_B_D), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u_star, bc.uBD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_B_N), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u_star);
break;
}
// v
switch (bc.vB) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_B_D), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v_star, bc.vBD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_B_N), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v_star);
break;
}
// w
switch (bc.wB) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_B_D), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, bc.wBD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_B_N), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star);
break;
}
}
// top
if (dom[rank].t == MPI_PROC_NULL) {
// u
switch (bc.uT) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_u_T_D), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u_star, bc.uTD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_u_T_N), dim3(blocks.Gfx.num_kn), dim3(blocks.Gfx.dim_kn), 0, 0, _u_star);
break;
}
// v
switch (bc.vT) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_v_T_D), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v_star, bc.vTD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_v_T_N), dim3(blocks.Gfy.num_kn), dim3(blocks.Gfy.dim_kn), 0, 0, _v_star);
break;
}
// w
switch (bc.wT) {
case DIRICHLET:
hipLaunchKernelGGL(( BC_w_T_D), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, bc.wTD);
break;
case NEUMANN:
hipLaunchKernelGGL(( BC_w_T_N), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star);
break;
}
}
}
extern "C"
void cuda_solvability(void)
{
//printf("N%d >> Enforcing solvability...\n", rank);
/* Calculate difference from zero on each domain, then MPI_Allreduce that
* value. It would be better to define an MPI_COMM for the edge cells,
* but that's an optimization.
*/
/* Differences from zero on each plane */
real eps_xs = 0.;
real eps_xe = 0.;
real eps_ys = 0.;
real eps_ye = 0.;
real eps_zs = 0.;
real eps_ze = 0.;
real eps[3]; // [x, y, z]
// local reduction, then global reduction
if (dom[rank].I == DOM.Is) {
/* Temporary storage for reduction */
real *u_star_tmp;
hipMalloc((void**) &u_star_tmp, dom[rank].Gfx.s2_i * sizeof(real));
/* Calculate x-face integral (is) */
hipLaunchKernelGGL(( surf_int_xs), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, u_star_tmp);
/* Reduction */
thrust::device_ptr<real> t_us_tmp(u_star_tmp);
eps_xs = thrust::reduce(t_us_tmp, t_us_tmp + dom[rank].Gfx.s2_i, 0.,
thrust::plus<real>());
eps_xs *= dom[rank].dy * dom[rank].dz;
/* clean up */
hipFree(u_star_tmp);
}
if (dom[rank].I == DOM.Ie) {
real *u_star_tmp;
hipMalloc((void**) &u_star_tmp, dom[rank].Gfx.s2_i * sizeof(real));
hipLaunchKernelGGL(( surf_int_xe), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, u_star_tmp);
thrust::device_ptr<real> t_us_tmp(u_star_tmp);
eps_xe = thrust::reduce(t_us_tmp, t_us_tmp + dom[rank].Gfx.s2_i, 0.,
thrust::plus<real>());
eps_xe *= dom[rank].dy * dom[rank].dz;
hipFree(u_star_tmp);
}
if (dom[rank].J == DOM.Js) {
real *v_star_tmp;
hipMalloc((void**) &v_star_tmp, dom[rank].Gfy.s2_j * sizeof(real));
hipLaunchKernelGGL(( surf_int_ys), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, v_star_tmp);
thrust::device_ptr<real> t_vs_tmp(v_star_tmp);
eps_ys = thrust::reduce(t_vs_tmp, t_vs_tmp + dom[rank].Gfy.s2_j, 0.,
thrust::plus<real>());
eps_ys *= dom[rank].dz * dom[rank].dx;
hipFree(v_star_tmp);
}
if (dom[rank].J == DOM.Je) {
real *v_star_tmp;
hipMalloc((void**) &v_star_tmp, dom[rank].Gfy.s2_j * sizeof(real));
hipLaunchKernelGGL(( surf_int_ye), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, v_star_tmp);
thrust::device_ptr<real> t_vs_tmp(v_star_tmp);
eps_ye = thrust::reduce(t_vs_tmp, t_vs_tmp + dom[rank].Gfy.s2_j, 0.,
thrust::plus<real>());
eps_ye *= dom[rank].dz * dom[rank].dx;
hipFree(v_star_tmp);
}
if (dom[rank].K == DOM.Ks) {
real *w_star_tmp;
hipMalloc((void**) &w_star_tmp, dom[rank].Gfz.s2_k * sizeof(real));
hipLaunchKernelGGL(( surf_int_zs), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, w_star_tmp);
thrust::device_ptr<real> t_ws_tmp(w_star_tmp);
eps_zs = thrust::reduce(t_ws_tmp, t_ws_tmp + dom[rank].Gfz.s2_k, 0.,
thrust::plus<real>());
eps_zs *= dom[rank].dx * dom[rank].dy;
hipFree(w_star_tmp);
}
if (dom[rank].K == DOM.Ke) {
real *w_star_tmp;
hipMalloc((void**) &w_star_tmp, dom[rank].Gfz.s2_k * sizeof(real));
hipLaunchKernelGGL(( surf_int_ze), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, w_star_tmp);
thrust::device_ptr<real> t_ws_tmp(w_star_tmp);
eps_ze = thrust::reduce(t_ws_tmp, t_ws_tmp + dom[rank].Gfz.s2_k, 0.,
thrust::plus<real>());
eps_ze *= dom[rank].dx * dom[rank].dy;
hipFree(w_star_tmp);
}
/* Find difference in each direction */
eps[0] = eps_xe - eps_xs;
eps[1] = eps_ye - eps_ys;
eps[2] = eps_ze - eps_zs;
/* MPI_Allreduce */
MPI_Allreduce(MPI_IN_PLACE, &eps, 3, mpi_real, MPI_SUM, MPI_COMM_WORLD);
/* subtract eps from outflow plane */
real sum;
switch (out_plane) {
case WEST:
if (dom[rank].I == DOM.Is) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.yl * DOM.zl);
hipLaunchKernelGGL(( plane_eps_x_W), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, sum);
}
break;
case EAST:
if (dom[rank].I == DOM.Ie) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.yl * DOM.zl);
hipLaunchKernelGGL(( plane_eps_x_E), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, sum);
}
break;
case SOUTH:
if (dom[rank].J == DOM.Js) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.zl * DOM.xl);
hipLaunchKernelGGL(( plane_eps_y_S), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, sum);
}
break;
case NORTH:
if (dom[rank].J == DOM.Je) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.zl * DOM.xl);
hipLaunchKernelGGL(( plane_eps_y_N), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, sum);
}
break;
case BOTTOM:
if (dom[rank].K == DOM.Ks) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.xl * DOM.yl);
hipLaunchKernelGGL(( plane_eps_z_B), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, sum);
}
break;
case TOP:
if (dom[rank].K == DOM.Ke) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.xl * DOM.yl);
hipLaunchKernelGGL(( plane_eps_z_T), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, sum);
}
break;
case HOMOGENEOUS:
// spread over entire domain
real sum_x = 0.5*eps[0]/(DOM.yl * DOM.zl);
real sum_y = 0.5*eps[1]/(DOM.zl * DOM.xl);
real sum_z = 0.5*eps[2]/(DOM.xl * DOM.yl);
if (dom[rank].I == DOM.Is)
hipLaunchKernelGGL(( plane_eps_x_W), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, sum_x);
if (dom[rank].I == DOM.Ie)
hipLaunchKernelGGL(( plane_eps_x_E), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, sum_x);
if (dom[rank].J == DOM.Js)
hipLaunchKernelGGL(( plane_eps_y_S), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, sum_y);
if (dom[rank].J == DOM.Je)
hipLaunchKernelGGL(( plane_eps_y_N), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, sum_y);
if (dom[rank].K == DOM.Ks)
hipLaunchKernelGGL(( plane_eps_z_B), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, sum_z);
if (dom[rank].K == DOM.Ke)
hipLaunchKernelGGL(( plane_eps_z_T), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, sum_z);
break;
}
}
extern "C"
void cuda_project(void)
{
hipLaunchKernelGGL(( project_u), dim3(blocks.Gfx.num_in), dim3(blocks.Gfx.dim_in), 0, 0, _u_star, _phi, rho_f, dt,
_u, 1. / dom[rank].dx, _flag_u);
hipLaunchKernelGGL(( project_v), dim3(blocks.Gfy.num_jn), dim3(blocks.Gfy.dim_jn), 0, 0, _v_star, _phi, rho_f, dt,
_v, 1. / dom[rank].dy, _flag_v);
hipLaunchKernelGGL(( project_w), dim3(blocks.Gfz.num_kn), dim3(blocks.Gfz.dim_kn), 0, 0, _w_star, _phi, rho_f, dt,
_w, 1. / dom[rank].dz, _flag_w);
}
extern "C"
void cuda_update_p()
{
/* Calculate laplacian of phi and update */
real *_Lp;
hipMalloc((void**) &_Lp, sizeof(real)*dom[rank].Gcc.s3b);
hipLaunchKernelGGL(( update_p_laplacian), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, _Lp, _phi);
hipLaunchKernelGGL(( update_p), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, _Lp, _p0, _p, _phi, nu,
dt, _phase);
hipFree(_Lp);
/* set mean pressure to zero */
real *_p_mean;
hipMalloc((void**) &_p_mean, sizeof(real)*dom[rank].Gcc.s3);
hipLaunchKernelGGL(( copy_p_p_noghost), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, _p_mean, _p);
thrust::device_ptr<real> t_p_mean(_p_mean);
real pmean = thrust::reduce(t_p_mean, t_p_mean + dom[rank].Gcc.s3, 0.,
thrust::plus<real>());
MPI_Allreduce(MPI_IN_PLACE, &pmean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
pmean /= (real) DOM.Gcc.s3;
// numerical reproducibility? + associativity of floating point addition
hipFree(_p_mean);
hipLaunchKernelGGL(( forcing_add_c_const), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, -pmean, _p);
}
extern "C"
void cuda_dom_BC_p(real *array)
{
// Can do this with an if/else, not switch/case
// if (bc.pW == NEUMANN)...
// Could also do this with dom[rank].I == DOM.Is
if (dom[rank].w == MPI_PROC_NULL) { // WEST
switch (bc.pW) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_W_N), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, array);
break;
}
}
if (dom[rank].e == MPI_PROC_NULL) { // EAST
switch (bc.pE) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_E_N), dim3(blocks.Gcc.num_in), dim3(blocks.Gcc.dim_in), 0, 0, array);
break;
}
}
if (dom[rank].s == MPI_PROC_NULL) { // SOUTH
switch (bc.pS) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_S_N), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, array);
break;
}
}
if (dom[rank].n == MPI_PROC_NULL) { // NORTH
switch (bc.pN) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_N_N), dim3(blocks.Gcc.num_jn), dim3(blocks.Gcc.dim_jn), 0, 0, array);
break;
}
}
if (dom[rank].b == MPI_PROC_NULL) { // BOTTOM
switch (bc.pB) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_B_N), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, array);
break;
}
}
if (dom[rank].t == MPI_PROC_NULL) { // TOP
switch (bc.pT) {
case NEUMANN:
hipLaunchKernelGGL(( BC_p_T_N), dim3(blocks.Gcc.num_kn), dim3(blocks.Gcc.dim_kn), 0, 0, array);
break;
}
}
}
extern "C"
void cuda_store_u(void)
{
hipMemcpy(_conv0_u, _conv_u, dom[rank].Gfx.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_conv0_v, _conv_v, dom[rank].Gfy.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_conv0_w, _conv_w, dom[rank].Gfz.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_diff0_u, _diff_u, dom[rank].Gfx.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_diff0_v, _diff_v, dom[rank].Gfy.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_diff0_w, _diff_w, dom[rank].Gfz.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_p0, _p, dom[rank].Gcc.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_u0, _u, dom[rank].Gfx.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_v0, _v, dom[rank].Gfy.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
hipMemcpy(_w0, _w, dom[rank].Gfz.s3b*sizeof(real),
hipMemcpyDeviceToDevice);
}
extern "C"
void cuda_dom_free(void)
{
// Free cuda memory on host
checkCudaErrors(hipHostFree(p));
checkCudaErrors(hipHostFree(p0));
checkCudaErrors(hipHostFree(u));
checkCudaErrors(hipHostFree(v));
checkCudaErrors(hipHostFree(w));
checkCudaErrors(hipHostFree(u0));
checkCudaErrors(hipHostFree(v0));
checkCudaErrors(hipHostFree(w0));
checkCudaErrors(hipHostFree(conv_u));
checkCudaErrors(hipHostFree(conv_v));
checkCudaErrors(hipHostFree(conv_w));
checkCudaErrors(hipHostFree(conv0_u));
checkCudaErrors(hipHostFree(conv0_v));
checkCudaErrors(hipHostFree(conv0_w));
checkCudaErrors(hipHostFree(diff_u));
checkCudaErrors(hipHostFree(diff_v));
checkCudaErrors(hipHostFree(diff_w));
checkCudaErrors(hipHostFree(diff0_u));
checkCudaErrors(hipHostFree(diff0_v));
checkCudaErrors(hipHostFree(diff0_w));
checkCudaErrors(hipHostFree(f_x));
checkCudaErrors(hipHostFree(f_y));
checkCudaErrors(hipHostFree(f_z));
checkCudaErrors(hipHostFree(u_star));
checkCudaErrors(hipHostFree(v_star));
checkCudaErrors(hipHostFree(w_star));
checkCudaErrors(hipHostFree(flag_u));
checkCudaErrors(hipHostFree(flag_v));
checkCudaErrors(hipHostFree(flag_w));
checkCudaErrors(hipHostFree(phi));
// Free cuda memory on device
checkCudaErrors(hipFree(_DOM));
checkCudaErrors(hipFree(_bc));
checkCudaErrors(hipFree(_p));
checkCudaErrors(hipFree(_p0));
checkCudaErrors(hipFree(_phi));
checkCudaErrors(hipFree(_phinoghost));
checkCudaErrors(hipFree(_invM));
checkCudaErrors(hipFree(_u));
checkCudaErrors(hipFree(_v));
checkCudaErrors(hipFree(_w));
checkCudaErrors(hipFree(_u0));
checkCudaErrors(hipFree(_v0));
checkCudaErrors(hipFree(_w0));
checkCudaErrors(hipFree(_conv_u));
checkCudaErrors(hipFree(_conv_v));
checkCudaErrors(hipFree(_conv_w));
checkCudaErrors(hipFree(_conv0_u));
checkCudaErrors(hipFree(_conv0_v));
checkCudaErrors(hipFree(_conv0_w));
checkCudaErrors(hipFree(_diff_u));
checkCudaErrors(hipFree(_diff_v));
checkCudaErrors(hipFree(_diff_w));
checkCudaErrors(hipFree(_diff0_u));
checkCudaErrors(hipFree(_diff0_v));
checkCudaErrors(hipFree(_diff0_w));
checkCudaErrors(hipFree(_f_x));
checkCudaErrors(hipFree(_f_y));
checkCudaErrors(hipFree(_f_z));
checkCudaErrors(hipFree(_u_star));
checkCudaErrors(hipFree(_v_star));
checkCudaErrors(hipFree(_w_star));
checkCudaErrors(hipFree(_flag_u));
checkCudaErrors(hipFree(_flag_v));
checkCudaErrors(hipFree(_flag_w));
checkCudaErrors(hipFree(_rhs_p));
checkCudaErrors(hipFree(_r_q));
checkCudaErrors(hipFree(_z_q));
//checkCudaErrors(hipFree(_rs_0));
checkCudaErrors(hipFree(_p_q));
checkCudaErrors(hipFree(_pb_q));
//checkCudaErrors(hipFree(_s_q));
//checkCudaErrors(hipFree(_sb_q));
checkCudaErrors(hipFree(_Apb_q));
//checkCudaErrors(hipFree(_Asb_q));
checkCudaErrors(hipFree(_send_Gcc_e));
checkCudaErrors(hipFree(_send_Gcc_w));
checkCudaErrors(hipFree(_send_Gcc_n));
checkCudaErrors(hipFree(_send_Gcc_s));
checkCudaErrors(hipFree(_send_Gcc_t));
checkCudaErrors(hipFree(_send_Gcc_b));
checkCudaErrors(hipFree(_send_Gfx_e));
checkCudaErrors(hipFree(_send_Gfx_w));
checkCudaErrors(hipFree(_send_Gfx_n));
checkCudaErrors(hipFree(_send_Gfx_s));
checkCudaErrors(hipFree(_send_Gfx_t));
checkCudaErrors(hipFree(_send_Gfx_b));
checkCudaErrors(hipFree(_send_Gfy_e));
checkCudaErrors(hipFree(_send_Gfy_w));
checkCudaErrors(hipFree(_send_Gfy_n));
checkCudaErrors(hipFree(_send_Gfy_s));
checkCudaErrors(hipFree(_send_Gfy_t));
checkCudaErrors(hipFree(_send_Gfy_b));
checkCudaErrors(hipFree(_send_Gfz_e));
checkCudaErrors(hipFree(_send_Gfz_w));
checkCudaErrors(hipFree(_send_Gfz_n));
checkCudaErrors(hipFree(_send_Gfz_s));
checkCudaErrors(hipFree(_send_Gfz_t));
checkCudaErrors(hipFree(_send_Gfz_b));
checkCudaErrors(hipFree(_recv_Gcc_e));
checkCudaErrors(hipFree(_recv_Gcc_w));
checkCudaErrors(hipFree(_recv_Gcc_n));
checkCudaErrors(hipFree(_recv_Gcc_s));
checkCudaErrors(hipFree(_recv_Gcc_t));
checkCudaErrors(hipFree(_recv_Gcc_b));
checkCudaErrors(hipFree(_recv_Gfx_e));
checkCudaErrors(hipFree(_recv_Gfx_w));
checkCudaErrors(hipFree(_recv_Gfx_n));
checkCudaErrors(hipFree(_recv_Gfx_s));
checkCudaErrors(hipFree(_recv_Gfx_t));
checkCudaErrors(hipFree(_recv_Gfx_b));
checkCudaErrors(hipFree(_recv_Gfy_e));
checkCudaErrors(hipFree(_recv_Gfy_w));
checkCudaErrors(hipFree(_recv_Gfy_n));
checkCudaErrors(hipFree(_recv_Gfy_s));
checkCudaErrors(hipFree(_recv_Gfy_t));
checkCudaErrors(hipFree(_recv_Gfy_b));
checkCudaErrors(hipFree(_recv_Gfz_e));
checkCudaErrors(hipFree(_recv_Gfz_w));
checkCudaErrors(hipFree(_recv_Gfz_n));
checkCudaErrors(hipFree(_recv_Gfz_s));
checkCudaErrors(hipFree(_recv_Gfz_t));
checkCudaErrors(hipFree(_recv_Gfz_b));
// Reset devices
checkCudaErrors(hipDeviceReset());
}
// Miscellaneous functions
extern "C"
void cuda_wall_shear_stress()
{
real *_dudy;
hipMalloc(&_dudy, dom[rank].Gfx.s2_j * sizeof(real));
hipMemset(_dudy, 0., dom[rank].Gfx.s2_j);
thrust::device_ptr<real> t_dudy(_dudy);
real dudy_s = 0.;
real dudy_n = 0.;
// On south face
if (dom[rank].J == DOM.Js) {
hipLaunchKernelGGL(( calc_dudy), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u, _dudy,
dom[rank].Gfx._jsb);
dudy_s = thrust::reduce(t_dudy, t_dudy + dom[rank].Gfx.s2_j, 0.,
thrust::plus<real>());
} else {
dudy_s = 0.;
}
MPI_Allreduce(MPI_IN_PLACE, &dudy_s, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
dudy_s /= DOM.Gfx.s2_j;
// On north face
if (dom[rank].J == DOM.Je) {
hipLaunchKernelGGL(( calc_dudy), dim3(blocks.Gfx.num_jn), dim3(blocks.Gfx.dim_jn), 0, 0, _u, _dudy,
dom[rank].Gfx._je);
dudy_n = thrust::reduce(t_dudy, t_dudy + dom[rank].Gfx.s2_j, 0.,
thrust::plus<real>());
} else {
dudy_n = 0.;
}
MPI_Allreduce(MPI_IN_PLACE, &dudy_n, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
dudy_n /= DOM.Gfx.s2_j;
// Open file for writing
if (rank == 0) {
char fname[FILE_NAME_SIZE];
sprintf(fname, "%s/%s/wss.dat", ROOT_DIR, OUTPUT_DIR);
FILE *file;
if (stepnum == 1) {
file = fopen(fname, "w");
if (file == NULL) {
fprintf(stderr, "Could not open file %s\n", fname);
exit(EXIT_FAILURE);
}
fprintf(file, "%-9s", "stepnum");
fprintf(file, "%-11s", "ttime");
fprintf(file, "%-11s", "wss-s");
fprintf(file, "%-11s", "wss-n");
} else {
file = fopen(fname, "a");
if (file == NULL) {
fprintf(stderr, "Could not open file %s\n", fname);
exit(EXIT_FAILURE);
}
}
fprintf(file, "\n");
fprintf(file, "%-9d", stepnum);
fprintf(file, "%-11.3e", ttime);
fprintf(file, "%-11.3e", rho_f*nu*dudy_s);
fprintf(file, "%-11.3e", rho_f*nu*dudy_n);
fclose(file);
}
// Free
hipFree(_dudy);
}
| 5fc6fbf20305f5d87cc2a38992f4082e537c83d5.cu | /*******************************************************************************
********************************* BLUEBOTTLE **********************************
*******************************************************************************
*
* Copyright 2012 - 2018 Adam Sierakowski and Daniel Willen,
* The Johns Hopkins University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Please contact the Johns Hopkins University to use Bluebottle for
* commercial and/or for-profit applications.
******************************************************************************/
#include <cuda.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h> // FOR DEBUG!!
#include <helper_cuda.h>
#include "cuda_bluebottle.h"
#include "cuda_particle.h"
__constant__ dom_struct _dom;
cuda_blocks_struct blocks;
extern "C"
void cuda_check_errors(int line)
{
printf("N%d >> Checking errors on line %d\n", rank, line);
checkCudaErrors(cudaDeviceSynchronize());
}
extern "C"
int cuda_device_count(void)
{
// Get number of cuda devices
int dev_count = 0;
checkCudaErrors(cudaGetDeviceCount(&dev_count));
return dev_count;
}
extern "C"
void cuda_device_init(int device)
{
checkCudaErrors(cudaSetDevice(device));
}
extern "C"
void cuda_enable_peer(void)
{
int target_peer = (rank + 1) % nprocs;
printf("Enabling peer access from %d to %d\n", rank, target_peer);
checkCudaErrors(cudaDeviceEnablePeerAccess(target_peer, 0));
}
extern "C"
void cuda_block(void)
{
cudaDeviceSynchronize();
}
extern "C"
void cuda_dom_malloc_host(void)
{
//printf("N%d >> Allocating pinned host memory... \n", rank);
// Allocate (pinned) device memory on host
checkCudaErrors(cudaMallocHost(&p, dom[rank].Gcc.s3b * sizeof(real)));
cpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&p0, dom[rank].Gcc.s3b * sizeof(real)));
cpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&phi, dom[rank].Gcc.s3b * sizeof(real)));
cpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&u0, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&v0, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&w0, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&conv_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&conv_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&conv_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&conv0_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&conv0_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&conv0_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&diff_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&diff_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&diff_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&diff0_u, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&diff0_v, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&diff0_w, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&f_x, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&f_y, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&f_z, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&u_star, dom[rank].Gfx.s3b * sizeof(real)));
cpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&v_star, dom[rank].Gfy.s3b * sizeof(real)));
cpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&w_star, dom[rank].Gfz.s3b * sizeof(real)));
cpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMallocHost(&flag_u, dom[rank].Gfx.s3b * sizeof(int)));
cpumem += dom[rank].Gfx.s3b * sizeof(int);
checkCudaErrors(cudaMallocHost(&flag_v, dom[rank].Gfy.s3b * sizeof(int)));
cpumem += dom[rank].Gfy.s3b * sizeof(int);
checkCudaErrors(cudaMallocHost(&flag_w, dom[rank].Gfz.s3b * sizeof(int)));
cpumem += dom[rank].Gfz.s3b * sizeof(int);
}
extern "C"
void cuda_dom_malloc_dev(void)
{
// Allocate device memory on device
// Don't need to free device constant memory
checkCudaErrors(cudaMemcpyToSymbol(_dom, &dom[rank], sizeof(dom_struct)));
checkCudaErrors(cudaMalloc((void**) &_DOM, sizeof(dom_struct)));
gpumem += sizeof(dom_struct);
checkCudaErrors(cudaMemcpy(_DOM, &DOM, sizeof(dom_struct),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void**) &_bc, sizeof(BC)));
gpumem += sizeof(BC);
checkCudaErrors(cudaMemcpy(_bc, &bc, sizeof(BC),
cudaMemcpyHostToDevice));
/* Flow solver variables */
checkCudaErrors(cudaMalloc(&_phi, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_phinoghost, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(cudaMalloc(&_invM, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(cudaMalloc(&_p, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_p0, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_u0, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_v0, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_w0, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_conv_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_conv_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_conv_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_conv0_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_conv0_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_conv0_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_diff_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_diff_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_diff_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_diff0_u, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_diff0_v, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_diff0_w, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_f_x, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_f_y, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_f_z, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_u_star, dom[rank].Gfx.s3b * sizeof(real)));
gpumem += dom[rank].Gfx.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_v_star, dom[rank].Gfy.s3b * sizeof(real)));
gpumem += dom[rank].Gfy.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_w_star, dom[rank].Gfz.s3b * sizeof(real)));
gpumem += dom[rank].Gfz.s3b * sizeof(real);
// Flags
checkCudaErrors(cudaMalloc(&_flag_u, dom[rank].Gfx.s3b * sizeof(int)));
gpumem += dom[rank].Gfx.s3b * sizeof(int);
checkCudaErrors(cudaMalloc(&_flag_v, dom[rank].Gfy.s3b * sizeof(int)));
gpumem += dom[rank].Gfy.s3b * sizeof(int);
checkCudaErrors(cudaMalloc(&_flag_w, dom[rank].Gfz.s3b * sizeof(int)));
gpumem += dom[rank].Gfz.s3b * sizeof(int);
/* Poisson Equation Variables */
checkCudaErrors(cudaMalloc(&_r_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(cudaMalloc(&_z_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
//checkCudaErrors(cudaMalloc(&_rs_0, dom[rank].Gcc.s3 * sizeof(real)));
// gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(cudaMalloc(&_p_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
//checkCudaErrors(cudaMalloc(&_s_q, dom[rank].Gcc.s3 * sizeof(real)));
// gpumem += dom[rank].Gcc.s3 * sizeof(real);
checkCudaErrors(cudaMalloc(&_Apb_q, dom[rank].Gcc.s3 * sizeof(real)));
gpumem += dom[rank].Gcc.s3 * sizeof(real);
//checkCudaErrors(cudaMalloc(&_Asb_q, dom[rank].Gcc.s3 * sizeof(real)));
// gpumem += dom[rank].Gcc.s3 * sizeof(real);
// These are s3b because the SpMv requires more info
checkCudaErrors(cudaMalloc(&_rhs_p, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
checkCudaErrors(cudaMalloc(&_pb_q, dom[rank].Gcc.s3b * sizeof(real)));
gpumem += dom[rank].Gcc.s3b * sizeof(real);
//checkCudaErrors(cudaMalloc(&_sb_q, dom[rank].Gcc.s3b * sizeof(real)));
// gpumem += dom[rank].Gcc.s3b * sizeof(real);
/* Subdomain communication variables */
// Outer computational planes
checkCudaErrors(cudaMalloc(&_send_Gcc_e, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gcc_w, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gcc_n, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gcc_s, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gcc_t, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gcc_b, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfx_e, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfx_w, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfx_n, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfx_s, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfx_t, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfx_b, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfy_e, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfy_w, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfy_n, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfy_s, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfy_t, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfy_b, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfz_e, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfz_w, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfz_n, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfz_s, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfz_t, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_send_Gfz_b, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
// Ghost cell planes
checkCudaErrors(cudaMalloc(&_recv_Gcc_e, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gcc_w, dom[rank].Gcc.s2_i * sizeof(real)));
gpumem += dom[rank].Gcc.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gcc_n, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gcc_s, dom[rank].Gcc.s2_j * sizeof(real)));
gpumem += dom[rank].Gcc.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gcc_t, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gcc_b, dom[rank].Gcc.s2_k * sizeof(real)));
gpumem += dom[rank].Gcc.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfx_e, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfx_w, dom[rank].Gfx.s2_i * sizeof(real)));
gpumem += dom[rank].Gfx.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfx_n, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfx_s, dom[rank].Gfx.s2_j * sizeof(real)));
gpumem += dom[rank].Gfx.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfx_t, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfx_b, dom[rank].Gfx.s2_k * sizeof(real)));
gpumem += dom[rank].Gfx.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfy_e, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfy_w, dom[rank].Gfy.s2_i * sizeof(real)));
gpumem += dom[rank].Gfy.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfy_n, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfy_s, dom[rank].Gfy.s2_j * sizeof(real)));
gpumem += dom[rank].Gfy.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfy_t, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfy_b, dom[rank].Gfy.s2_k * sizeof(real)));
gpumem += dom[rank].Gfy.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfz_e, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfz_w, dom[rank].Gfz.s2_i * sizeof(real)));
gpumem += dom[rank].Gfz.s2_i * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfz_n, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfz_s, dom[rank].Gfz.s2_j * sizeof(real)));
gpumem += dom[rank].Gfz.s2_j * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfz_t, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
checkCudaErrors(cudaMalloc(&_recv_Gfz_b, dom[rank].Gfz.s2_k * sizeof(real)));
gpumem += dom[rank].Gfz.s2_k * sizeof(real);
// Init things that we will need
checkCudaErrors(cudaMemset(_u, 0., dom[rank].Gfx.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_v, 0., dom[rank].Gfy.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_w, 0., dom[rank].Gfz.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_p, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_u0, 0., dom[rank].Gfx.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_v0, 0., dom[rank].Gfy.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_w0, 0., dom[rank].Gfz.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_p0, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_phi, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_rhs_p, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_p_q, 0., dom[rank].Gcc.s3 * sizeof(real)));
checkCudaErrors(cudaMemset(_pb_q, 0., dom[rank].Gcc.s3b * sizeof(real)));
//checkCudaErrors(cudaMemset(_s_q, 0., dom[rank].Gcc.s3 * sizeof(real)));
//checkCudaErrors(cudaMemset(_sb_q, 0., dom[rank].Gcc.s3b * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gcc_e, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gcc_w, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gcc_n, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gcc_s, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gcc_t, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gcc_b, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfx_e, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfx_w, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfx_n, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfx_s, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfx_t, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfx_b, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfy_e, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfy_w, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfy_n, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfy_s, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfy_t, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfy_b, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfz_e, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfz_w, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfz_n, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfz_s, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfz_t, 0., dom[rank].Gfz.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_send_Gfz_b, 0., dom[rank].Gfz.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gcc_e, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gcc_w, 0., dom[rank].Gcc.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gcc_n, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gcc_s, 0., dom[rank].Gcc.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gcc_t, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gcc_b, 0., dom[rank].Gcc.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfx_e, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfx_w, 0., dom[rank].Gfx.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfx_n, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfx_s, 0., dom[rank].Gfx.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfx_t, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfx_b, 0., dom[rank].Gfx.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfy_e, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfy_w, 0., dom[rank].Gfy.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfy_n, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfy_s, 0., dom[rank].Gfy.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfy_t, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfy_b, 0., dom[rank].Gfy.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfz_e, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfz_w, 0., dom[rank].Gfz.s2_i * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfz_n, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfz_s, 0., dom[rank].Gfz.s2_j * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfz_t, 0., dom[rank].Gfz.s2_k * sizeof(real)));
checkCudaErrors(cudaMemset(_recv_Gfz_b, 0., dom[rank].Gfz.s2_k * sizeof(real)));
}
extern "C"
void cuda_update_bc(void)
{
printf("\nupdate bc\n");
update_vel_BC<<<1, 1>>>(_bc, v_bc_tdelay, ttime);
}
extern "C"
void cuda_dom_push(void)
{
// Push initialized domain data from host to device
checkCudaErrors(cudaMemcpy(_p, p, dom[rank].Gcc.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_u, u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_v, v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_w, w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_p0, p0, dom[rank].Gcc.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_u0, u0, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_v0, v0, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_w0, w0, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_phi, phi, dom[rank].Gcc.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_u_star, u_star, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_v_star, v_star, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_w_star, w_star, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_conv_u, conv_u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_conv_v, conv_v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_conv_w, conv_w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_conv0_u, conv0_u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_conv0_v, conv0_v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_conv0_w, conv0_w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_diff_u, diff_u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_diff_v, diff_v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_diff_w, diff_w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_diff0_u, diff0_u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_diff0_v, diff0_v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_diff0_w, diff0_w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_f_x, f_x, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_f_y, f_y, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(_f_z, f_z, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyHostToDevice));
}
extern "C"
void cuda_blocks_init()
{
//printf("N%d >> Creating cuda thread dimensions and size\n", rank);
gpumem += sizeof(cuda_blocks_struct);
int threads_x = 0;
int threads_y = 0;
int threads_z = 0;
int blocks_x = 0;
int blocks_y = 0;
int blocks_z = 0;
/* Computational Grid - Gcc */
threads_x = dom[rank].Gcc.in * (dom[rank].Gcc.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gcc.jn * (dom[rank].Gcc.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gcc.kn * (dom[rank].Gcc.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gcc.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gcc.kn / (real) threads_z);
// Create tmp variables
dim3 Gcc_dim_in(threads_y, threads_z);
dim3 Gcc_dim_jn(threads_z, threads_x);
dim3 Gcc_dim_kn(threads_x, threads_y);
dim3 Gcc_num_in(blocks_y, blocks_z);
dim3 Gcc_num_jn(blocks_z, blocks_x);
dim3 Gcc_num_kn(blocks_x, blocks_y);
dim3 Gcc_dim_s3(threads_x, threads_y, threads_z);
dim3 Gcc_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gcc.dim_in = Gcc_dim_in;
blocks.Gcc.dim_jn = Gcc_dim_jn;
blocks.Gcc.dim_kn = Gcc_dim_kn;
blocks.Gcc.num_in = Gcc_num_in;
blocks.Gcc.num_jn = Gcc_num_jn;
blocks.Gcc.num_kn = Gcc_num_kn;
blocks.Gcc.dim_s3 = Gcc_dim_s3;
blocks.Gcc.num_s3 = Gcc_num_s3;
/* Computational Shared Grid, GCC */
threads_x = (dom[rank].Gcc.in+2) *((dom[rank].Gcc.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gcc.jn+2) *((dom[rank].Gcc.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gcc.kn+2) *((dom[rank].Gcc.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gcc.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gcc.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gcc_dim_in_s(threads_y, threads_z);
dim3 Gcc_dim_jn_s(threads_z, threads_x);
dim3 Gcc_dim_kn_s(threads_x, threads_y);
dim3 Gcc_num_in_s(blocks_y, blocks_z);
dim3 Gcc_num_jn_s(blocks_z, blocks_x);
dim3 Gcc_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gcc.dim_in_s = Gcc_dim_in_s;
blocks.Gcc.dim_jn_s = Gcc_dim_jn_s;
blocks.Gcc.dim_kn_s = Gcc_dim_kn_s;
blocks.Gcc.num_in_s = Gcc_num_in_s;
blocks.Gcc.num_jn_s = Gcc_num_jn_s;
blocks.Gcc.num_kn_s = Gcc_num_kn_s;
/* Computational Grid - Gfx */
threads_x = dom[rank].Gfx.in * (dom[rank].Gfx.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfx.jn * (dom[rank].Gfx.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfx.kn * (dom[rank].Gfx.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfx.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfx.kn / (real) threads_z);
// Create tmp variables
dim3 Gfx_dim_in(threads_y, threads_z);
dim3 Gfx_dim_jn(threads_z, threads_x);
dim3 Gfx_dim_kn(threads_x, threads_y);
dim3 Gfx_num_in(blocks_y, blocks_z);
dim3 Gfx_num_jn(blocks_z, blocks_x);
dim3 Gfx_num_kn(blocks_x, blocks_y);
dim3 Gfx_dim_s3(threads_x, threads_y, threads_z);
dim3 Gfx_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfx.dim_in = Gfx_dim_in;
blocks.Gfx.dim_jn = Gfx_dim_jn;
blocks.Gfx.dim_kn = Gfx_dim_kn;
blocks.Gfx.num_in = Gfx_num_in;
blocks.Gfx.num_jn = Gfx_num_jn;
blocks.Gfx.num_kn = Gfx_num_kn;
blocks.Gfx.dim_s3 = Gfx_dim_s3;
blocks.Gfx.num_s3 = Gfx_num_s3;
/* Computational Shared Grid - Gfx */
threads_x = (dom[rank].Gfx.in+2) *((dom[rank].Gfx.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfx.jn+2) *((dom[rank].Gfx.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfx.kn+2) *((dom[rank].Gfx.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfx.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfx.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfx_dim_in_s(threads_y, threads_z);
dim3 Gfx_dim_jn_s(threads_z, threads_x);
dim3 Gfx_dim_kn_s(threads_x, threads_y);
dim3 Gfx_num_in_s(blocks_y, blocks_z);
dim3 Gfx_num_jn_s(blocks_z, blocks_x);
dim3 Gfx_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfx.dim_in_s = Gfx_dim_in_s;
blocks.Gfx.dim_jn_s = Gfx_dim_jn_s;
blocks.Gfx.dim_kn_s = Gfx_dim_kn_s;
blocks.Gfx.num_in_s = Gfx_num_in_s;
blocks.Gfx.num_jn_s = Gfx_num_jn_s;
blocks.Gfx.num_kn_s = Gfx_num_kn_s;
/* Computational Grid - Gfy */
threads_x = dom[rank].Gfy.in * (dom[rank].Gfy.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfy.jn * (dom[rank].Gfy.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfy.kn * (dom[rank].Gfy.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfy.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfy.kn / (real) threads_z);
// Create tmp variables
dim3 Gfy_dim_in(threads_y, threads_z);
dim3 Gfy_dim_jn(threads_z, threads_x);
dim3 Gfy_dim_kn(threads_x, threads_y);
dim3 Gfy_num_in(blocks_y, blocks_z);
dim3 Gfy_num_jn(blocks_z, blocks_x);
dim3 Gfy_num_kn(blocks_x, blocks_y);
dim3 Gfy_dim_s3(threads_x, threads_y, threads_z);
dim3 Gfy_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfy.dim_in = Gfy_dim_in;
blocks.Gfy.dim_jn = Gfy_dim_jn;
blocks.Gfy.dim_kn = Gfy_dim_kn;
blocks.Gfy.num_in = Gfy_num_in;
blocks.Gfy.num_jn = Gfy_num_jn;
blocks.Gfy.num_kn = Gfy_num_kn;
blocks.Gfy.dim_s3 = Gfy_dim_s3;
blocks.Gfy.num_s3 = Gfy_num_s3;
/* Computational Shared Grid - Gfy */
threads_x = (dom[rank].Gfy.in+2) *((dom[rank].Gfy.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfy.jn+2) *((dom[rank].Gfy.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfy.kn+2) *((dom[rank].Gfy.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfy.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfy.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfy_dim_in_s(threads_y, threads_z);
dim3 Gfy_dim_jn_s(threads_z, threads_x);
dim3 Gfy_dim_kn_s(threads_x, threads_y);
dim3 Gfy_num_in_s(blocks_y, blocks_z);
dim3 Gfy_num_jn_s(blocks_z, blocks_x);
dim3 Gfy_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfy.dim_in_s = Gfy_dim_in_s;
blocks.Gfy.dim_jn_s = Gfy_dim_jn_s;
blocks.Gfy.dim_kn_s = Gfy_dim_kn_s;
blocks.Gfy.num_in_s = Gfy_num_in_s;
blocks.Gfy.num_jn_s = Gfy_num_jn_s;
blocks.Gfy.num_kn_s = Gfy_num_kn_s;
/* Computational Grid - Gfz */
threads_x = dom[rank].Gfz.in * (dom[rank].Gfz.in < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.in >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfz.jn * (dom[rank].Gfz.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.jn >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfz.kn * (dom[rank].Gfz.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.kn >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.in / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfz.jn / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfz.kn / (real) threads_z);
// Create tmp variables
dim3 Gfz_dim_in(threads_y, threads_z);
dim3 Gfz_dim_jn(threads_z, threads_x);
dim3 Gfz_dim_kn(threads_x, threads_y);
dim3 Gfz_num_in(blocks_y, blocks_z);
dim3 Gfz_num_jn(blocks_z, blocks_x);
dim3 Gfz_num_kn(blocks_x, blocks_y);
dim3 Gfz_dim_s3(threads_x, threads_y, threads_z);
dim3 Gfz_num_s3(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfz.dim_in = Gfz_dim_in;
blocks.Gfz.dim_jn = Gfz_dim_jn;
blocks.Gfz.dim_kn = Gfz_dim_kn;
blocks.Gfz.num_in = Gfz_num_in;
blocks.Gfz.num_jn = Gfz_num_jn;
blocks.Gfz.num_kn = Gfz_num_kn;
blocks.Gfz.dim_s3 = Gfz_dim_s3;
blocks.Gfz.num_s3 = Gfz_num_s3;
/* Computational Shared Grid - Gfz */
threads_x = (dom[rank].Gfz.in+2) *((dom[rank].Gfz.in+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.in+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfz.jn+2) *((dom[rank].Gfz.jn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.jn+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfz.kn+2) *((dom[rank].Gfz.kn+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.kn+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.in / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfz.jn / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfz.kn / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfz_dim_in_s(threads_y, threads_z);
dim3 Gfz_dim_jn_s(threads_z, threads_x);
dim3 Gfz_dim_kn_s(threads_x, threads_y);
dim3 Gfz_num_in_s(blocks_y, blocks_z);
dim3 Gfz_num_jn_s(blocks_z, blocks_x);
dim3 Gfz_num_kn_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfz.dim_in_s = Gfz_dim_in_s;
blocks.Gfz.dim_jn_s = Gfz_dim_jn_s;
blocks.Gfz.dim_kn_s = Gfz_dim_kn_s;
blocks.Gfz.num_in_s = Gfz_num_in_s;
blocks.Gfz.num_jn_s = Gfz_num_jn_s;
blocks.Gfz.num_kn_s = Gfz_num_kn_s;
/* Ghost grid - Gcc */
threads_x = dom[rank].Gcc.inb * (dom[rank].Gcc.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gcc.jnb * (dom[rank].Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gcc.knb * (dom[rank].Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gcc.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gcc.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gcc.knb / (real) threads_z);
// Create tmp variables
dim3 Gcc_dim_inb(threads_y, threads_z);
dim3 Gcc_dim_jnb(threads_z, threads_x);
dim3 Gcc_dim_knb(threads_x, threads_y);
dim3 Gcc_num_inb(blocks_y, blocks_z);
dim3 Gcc_num_jnb(blocks_z, blocks_x);
dim3 Gcc_num_knb(blocks_x, blocks_y);
dim3 Gcc_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gcc_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gcc.dim_inb = Gcc_dim_inb;
blocks.Gcc.dim_jnb = Gcc_dim_jnb;
blocks.Gcc.dim_knb = Gcc_dim_knb;
blocks.Gcc.num_inb = Gcc_num_inb;
blocks.Gcc.num_jnb = Gcc_num_jnb;
blocks.Gcc.num_knb = Gcc_num_knb;
blocks.Gcc.dim_s3b = Gcc_dim_s3b;
blocks.Gcc.num_s3b = Gcc_num_s3b;
/* Ghost Shared Grid - Gcc */
threads_x = (dom[rank].Gcc.inb+2) *((dom[rank].Gcc.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gcc.jnb+2) *((dom[rank].Gcc.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gcc.knb+2) *((dom[rank].Gcc.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gcc.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gcc.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gcc.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gcc.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gcc_dim_inb_s(threads_y, threads_z);
dim3 Gcc_dim_jnb_s(threads_z, threads_x);
dim3 Gcc_dim_knb_s(threads_x, threads_y);
dim3 Gcc_num_inb_s(blocks_y, blocks_z);
dim3 Gcc_num_jnb_s(blocks_z, blocks_x);
dim3 Gcc_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gcc.dim_inb_s = Gcc_dim_inb_s;
blocks.Gcc.dim_jnb_s = Gcc_dim_jnb_s;
blocks.Gcc.dim_knb_s = Gcc_dim_knb_s;
blocks.Gcc.num_inb_s = Gcc_num_inb_s;
blocks.Gcc.num_jnb_s = Gcc_num_jnb_s;
blocks.Gcc.num_knb_s = Gcc_num_knb_s;
/* Ghost grid - Gfx */
threads_x = dom[rank].Gfx.inb * (dom[rank].Gfx.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfx.jnb * (dom[rank].Gfx.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfx.knb * (dom[rank].Gfx.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfx.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfx.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfx.knb / (real) threads_z);
// Create tmp variables
dim3 Gfx_dim_inb(threads_y, threads_z);
dim3 Gfx_dim_jnb(threads_z, threads_x);
dim3 Gfx_dim_knb(threads_x, threads_y);
dim3 Gfx_num_inb(blocks_y, blocks_z);
dim3 Gfx_num_jnb(blocks_z, blocks_x);
dim3 Gfx_num_knb(blocks_x, blocks_y);
dim3 Gfx_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gfx_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfx.dim_inb = Gfx_dim_inb;
blocks.Gfx.dim_jnb = Gfx_dim_jnb;
blocks.Gfx.dim_knb = Gfx_dim_knb;
blocks.Gfx.num_inb = Gfx_num_inb;
blocks.Gfx.num_jnb = Gfx_num_jnb;
blocks.Gfx.num_knb = Gfx_num_knb;
blocks.Gfx.dim_s3b = Gfx_dim_s3b;
blocks.Gfx.num_s3b = Gfx_num_s3b;
/* Ghost Shared Grid - Gfx */
threads_x = (dom[rank].Gfx.inb+2) *((dom[rank].Gfx.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfx.jnb+2) *((dom[rank].Gfx.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfx.knb+2) *((dom[rank].Gfx.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfx.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfx.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfx.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfx.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfx_dim_inb_s(threads_y, threads_z);
dim3 Gfx_dim_jnb_s(threads_z, threads_x);
dim3 Gfx_dim_knb_s(threads_x, threads_y);
dim3 Gfx_num_inb_s(blocks_y, blocks_z);
dim3 Gfx_num_jnb_s(blocks_z, blocks_x);
dim3 Gfx_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfx.dim_inb_s = Gfx_dim_inb_s;
blocks.Gfx.dim_jnb_s = Gfx_dim_jnb_s;
blocks.Gfx.dim_knb_s = Gfx_dim_knb_s;
blocks.Gfx.num_inb_s = Gfx_num_inb_s;
blocks.Gfx.num_jnb_s = Gfx_num_jnb_s;
blocks.Gfx.num_knb_s = Gfx_num_knb_s;
/* Ghost grid - Gfy */
threads_x = dom[rank].Gfy.inb * (dom[rank].Gfy.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfy.jnb * (dom[rank].Gfy.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfy.knb * (dom[rank].Gfy.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfy.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfy.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfy.knb / (real) threads_z);
// Create tmp variables
dim3 Gfy_dim_inb(threads_y, threads_z);
dim3 Gfy_dim_jnb(threads_z, threads_x);
dim3 Gfy_dim_knb(threads_x, threads_y);
dim3 Gfy_num_inb(blocks_y, blocks_z);
dim3 Gfy_num_jnb(blocks_z, blocks_x);
dim3 Gfy_num_knb(blocks_x, blocks_y);
dim3 Gfy_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gfy_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfy.dim_inb = Gfy_dim_inb;
blocks.Gfy.dim_jnb = Gfy_dim_jnb;
blocks.Gfy.dim_knb = Gfy_dim_knb;
blocks.Gfy.num_inb = Gfy_num_inb;
blocks.Gfy.num_jnb = Gfy_num_jnb;
blocks.Gfy.num_knb = Gfy_num_knb;
blocks.Gfy.dim_s3b = Gfy_dim_s3b;
blocks.Gfy.num_s3b = Gfy_num_s3b;
/* Ghost Shared Grid - Gfy */
threads_x = (dom[rank].Gfy.inb+2) *((dom[rank].Gfy.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfy.jnb+2) *((dom[rank].Gfy.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfy.knb+2) *((dom[rank].Gfy.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfy.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfy.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfy.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfy.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfy_dim_inb_s(threads_y, threads_z);
dim3 Gfy_dim_jnb_s(threads_z, threads_x);
dim3 Gfy_dim_knb_s(threads_x, threads_y);
dim3 Gfy_num_inb_s(blocks_y, blocks_z);
dim3 Gfy_num_jnb_s(blocks_z, blocks_x);
dim3 Gfy_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfy.dim_inb_s = Gfy_dim_inb_s;
blocks.Gfy.dim_jnb_s = Gfy_dim_jnb_s;
blocks.Gfy.dim_knb_s = Gfy_dim_knb_s;
blocks.Gfy.num_inb_s = Gfy_num_inb_s;
blocks.Gfy.num_jnb_s = Gfy_num_jnb_s;
blocks.Gfy.num_knb_s = Gfy_num_knb_s;
/* Ghost grid - Gfz */
threads_x = dom[rank].Gfz.inb * (dom[rank].Gfz.inb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.inb >= MAX_THREADS_DIM);
threads_y = dom[rank].Gfz.jnb * (dom[rank].Gfz.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.jnb >= MAX_THREADS_DIM);
threads_z = dom[rank].Gfz.knb * (dom[rank].Gfz.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (dom[rank].Gfz.knb >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.inb / (real) threads_x);
blocks_y = (int) ceil((real) dom[rank].Gfz.jnb / (real) threads_y);
blocks_z = (int) ceil((real) dom[rank].Gfz.knb / (real) threads_z);
// Create tmp variables
dim3 Gfz_dim_inb(threads_y, threads_z);
dim3 Gfz_dim_jnb(threads_z, threads_x);
dim3 Gfz_dim_knb(threads_x, threads_y);
dim3 Gfz_num_inb(blocks_y, blocks_z);
dim3 Gfz_num_jnb(blocks_z, blocks_x);
dim3 Gfz_num_knb(blocks_x, blocks_y);
dim3 Gfz_dim_s3b(threads_x, threads_y, threads_z);
dim3 Gfz_num_s3b(blocks_x, blocks_y, blocks_z);
// Copy (by value) to structs
blocks.Gfz.dim_inb = Gfz_dim_inb;
blocks.Gfz.dim_jnb = Gfz_dim_jnb;
blocks.Gfz.dim_knb = Gfz_dim_knb;
blocks.Gfz.num_inb = Gfz_num_inb;
blocks.Gfz.num_jnb = Gfz_num_jnb;
blocks.Gfz.num_knb = Gfz_num_knb;
blocks.Gfz.dim_s3b = Gfz_dim_s3b;
blocks.Gfz.num_s3b = Gfz_num_s3b;
/* Ghost Shared Grid - Gfz */
threads_x = (dom[rank].Gfz.inb+2) *((dom[rank].Gfz.inb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.inb+2) >= MAX_THREADS_DIM);
threads_y = (dom[rank].Gfz.jnb+2) *((dom[rank].Gfz.jnb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.jnb+2) >= MAX_THREADS_DIM);
threads_z = (dom[rank].Gfz.knb+2) *((dom[rank].Gfz.knb+2) < MAX_THREADS_DIM)
+ MAX_THREADS_DIM *((dom[rank].Gfz.knb+2) >= MAX_THREADS_DIM);
blocks_x = (int) ceil((real) dom[rank].Gfz.inb / (real) (threads_x - 2));
blocks_y = (int) ceil((real) dom[rank].Gfz.jnb / (real) (threads_y - 2));
blocks_z = (int) ceil((real) dom[rank].Gfz.knb / (real) (threads_z - 2));
// Create tmp variables
dim3 Gfz_dim_inb_s(threads_y, threads_z);
dim3 Gfz_dim_jnb_s(threads_z, threads_x);
dim3 Gfz_dim_knb_s(threads_x, threads_y);
dim3 Gfz_num_inb_s(blocks_y, blocks_z);
dim3 Gfz_num_jnb_s(blocks_z, blocks_x);
dim3 Gfz_num_knb_s(blocks_x, blocks_y);
// Copy (by value) to structs
blocks.Gfz.dim_inb_s = Gfz_dim_inb_s;
blocks.Gfz.dim_jnb_s = Gfz_dim_jnb_s;
blocks.Gfz.dim_knb_s = Gfz_dim_knb_s;
blocks.Gfz.num_inb_s = Gfz_num_inb_s;
blocks.Gfz.num_jnb_s = Gfz_num_jnb_s;
blocks.Gfz.num_knb_s = Gfz_num_knb_s;
}
extern "C"
void cuda_dom_BC(void)
{
//printf("N%d >> Applying boundary conditions to u_star.\n", rank);
// Check whether each subdom boundary is an external boundary, then
// apply the correct boundary conditions to all fields on that face
// Only apply boundary conditions on the inner [*n x *n] plane, not the
// [*nb x *nb] -- this ensures we don't set the points that don't contain
// any solution, and we also don't set points twice
/* WEST */
if (dom[rank].w == MPI_PROC_NULL) {
switch (bc.pW) {
case NEUMANN:
BC_p_W_N<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(_p);
break;
}
switch (bc.uW) {
case DIRICHLET:
BC_u_W_D<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u, bc.uWD);
break;
case NEUMANN:
BC_u_W_N<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u);
break;
}
switch (bc.vW) {
case DIRICHLET:
BC_v_W_D<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v, bc.vWD);
break;
case NEUMANN:
BC_v_W_N<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v);
break;
}
switch (bc.wW) {
case DIRICHLET:
BC_w_W_D<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w, bc.wWD);
break;
case NEUMANN:
BC_w_W_N<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w);
break;
}
}
/* EAST */
if (dom[rank].e == MPI_PROC_NULL) {
switch (bc.pE) {
case NEUMANN:
BC_p_E_N<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(_p);
}
switch (bc.uE) {
case DIRICHLET:
BC_u_E_D<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u, bc.uED);
break;
case NEUMANN:
BC_u_E_N<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u);
break;
}
switch (bc.vE) {
case DIRICHLET:
BC_v_E_D<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v, bc.vED);
break;
case NEUMANN:
BC_v_E_N<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v);
break;
}
switch (bc.wE) {
case DIRICHLET:
BC_w_E_D<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w, bc.wED);
break;
case NEUMANN:
BC_w_E_N<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w);
break;
}
}
/* SOUTH */
if (dom[rank].s == MPI_PROC_NULL) {
switch (bc.pS) {
case NEUMANN:
BC_p_S_N<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(_p);
}
switch (bc.uS) {
case DIRICHLET:
BC_u_S_D<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u, bc.uSD);
break;
case NEUMANN:
BC_u_S_N<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u);
break;
}
switch (bc.vS) {
case DIRICHLET:
BC_v_S_D<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v, bc.vSD);
break;
case NEUMANN:
BC_v_S_N<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v);
break;
}
switch (bc.wS) {
case DIRICHLET:
BC_w_S_D<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w, bc.wSD);
break;
case NEUMANN:
BC_w_S_N<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w);
break;
}
}
/* NORTH */
if (dom[rank].n == MPI_PROC_NULL) {
switch (bc.pN) {
case NEUMANN:
BC_p_N_N<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(_p);
}
switch (bc.uN) {
case DIRICHLET:
BC_u_N_D<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u, bc.uND);
break;
case NEUMANN:
BC_u_N_N<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u);
break;
}
switch (bc.vN) {
case DIRICHLET:
BC_v_N_D<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v, bc.vND);
break;
case NEUMANN:
BC_v_N_N<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v);
break;
}
switch (bc.wN) {
case DIRICHLET:
BC_w_N_D<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w, bc.wND);
break;
case NEUMANN:
BC_w_N_N<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w);
break;
}
}
/* BOTTOM */
if (dom[rank].b == MPI_PROC_NULL) {
switch (bc.pB) {
case NEUMANN:
BC_p_B_N<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_p);
}
switch (bc.uB) {
case DIRICHLET:
BC_u_B_D<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u, bc.uBD);
break;
case NEUMANN:
BC_u_B_N<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u);
break;
}
switch (bc.vB) {
case DIRICHLET:
BC_v_B_D<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v, bc.vBD);
break;
case NEUMANN:
BC_v_B_N<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v);
break;
}
switch (bc.wB) {
case DIRICHLET:
BC_w_B_D<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w, bc.wBD);
break;
case NEUMANN:
BC_w_B_N<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w);
break;
}
}
/* TOP */
if (dom[rank].t == MPI_PROC_NULL) {
switch (bc.pT) {
case NEUMANN:
BC_p_T_N<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_p);
}
switch (bc.uT) {
case DIRICHLET:
BC_u_T_D<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u, bc.uTD);
break;
case NEUMANN:
BC_u_T_N<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u);
break;
}
switch (bc.vT) {
case DIRICHLET:
BC_v_T_D<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v, bc.vTD);
break;
case NEUMANN:
BC_v_T_N<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v);
break;
}
switch (bc.wT) {
case DIRICHLET:
BC_w_T_D<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w, bc.wTD);
break;
case NEUMANN:
BC_w_T_N<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w);
break;
}
}
}
extern "C"
void cuda_dom_pull(void)
{
// Pull domain data from device to host
checkCudaErrors(cudaMemcpy(p, _p, dom[rank].Gcc.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(u, _u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(v, _v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(w, _w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
}
extern "C"
void cuda_dom_pull_phase(void)
{
checkCudaErrors(cudaMemcpy(phase, _phase, dom[rank].Gcc.s3b * sizeof(int),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(phase_shell, _phase_shell, dom[rank].Gcc.s3b * sizeof(int),
cudaMemcpyDeviceToHost));
}
extern "C"
void cuda_dom_pull_debug(void)
{
//printf("N%d >> Pulling dom device->host (debug)\n", rank);
checkCudaErrors(cudaMemcpy(phi, _phi, dom[rank].Gcc.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(u_star, _u_star, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(v_star, _v_star, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(w_star, _w_star, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(conv_u, _conv_u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(conv_v, _conv_v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(conv_w, _conv_w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(diff_u, _diff_u, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(diff_v, _diff_v, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(diff_w, _diff_w, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(flag_u, _flag_u, dom[rank].Gfx.s3b * sizeof(int),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(flag_v, _flag_v, dom[rank].Gfy.s3b * sizeof(int),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(flag_w, _flag_w, dom[rank].Gfz.s3b * sizeof(int),
cudaMemcpyDeviceToHost));
//if (NPARTS > 0) { // Already pulled in cuda_dom_pull()
// checkCudaErrors(cudaMemcpy(phase, _phase, dom[rank].Gcc.s3b * sizeof(int),
// cudaMemcpyDeviceToHost));
// checkCudaErrors(cudaMemcpy(phase_shell, _phase_shell, dom[rank].Gcc.s3b * sizeof(int),
// cudaMemcpyDeviceToHost));
//}
}
extern "C"
void cuda_dom_pull_restart(void) {
checkCudaErrors(cudaMemcpy(p0, _p0, dom[rank].Gcc.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(u0, _u0, dom[rank].Gfx.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(v0, _v0, dom[rank].Gfy.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(w0, _w0, dom[rank].Gfz.s3b * sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(conv0_u, _conv0_u, dom[rank].Gfx.s3b *sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(conv0_v, _conv0_v, dom[rank].Gfy.s3b *sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(conv0_w, _conv0_w, dom[rank].Gfz.s3b *sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(diff0_u, _diff0_u, dom[rank].Gfx.s3b *sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(diff0_v, _diff0_v, dom[rank].Gfy.s3b *sizeof(real),
cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(diff0_w, _diff0_w, dom[rank].Gfz.s3b *sizeof(real),
cudaMemcpyDeviceToHost));
}
extern "C"
void cuda_self_exchange_i(real *array)
{
self_exchange_Gcc_i<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(array);
}
extern "C"
void cuda_self_exchange_j(real *array)
{
self_exchange_Gcc_j<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(array);
}
extern "C"
void cuda_self_exchange_k(real *array)
{
self_exchange_Gcc_k<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(array);
}
extern "C"
void cuda_pack_planes_Gcc(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
pack_planes_Gcc_east<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(array,
_send_Gcc_e);
if (dom[rank].w != MPI_PROC_NULL)
pack_planes_Gcc_west<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(array,
_send_Gcc_w);
if (dom[rank].n != MPI_PROC_NULL)
pack_planes_Gcc_north<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(array,
_send_Gcc_n);
if (dom[rank].s != MPI_PROC_NULL)
pack_planes_Gcc_south<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(array,
_send_Gcc_s);
if (dom[rank].t != MPI_PROC_NULL)
pack_planes_Gcc_top<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(array,
_send_Gcc_t);
if (dom[rank].b != MPI_PROC_NULL)
pack_planes_Gcc_bottom<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(array,
_send_Gcc_b);
}
extern "C"
void cuda_pack_planes_Gfx(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
pack_planes_Gfx_east<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(array,
_send_Gfx_e);
if (dom[rank].w != MPI_PROC_NULL)
pack_planes_Gfx_west<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(array,
_send_Gfx_w);
if (dom[rank].n != MPI_PROC_NULL)
pack_planes_Gfx_north<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(array,
_send_Gfx_n);
if (dom[rank].s != MPI_PROC_NULL)
pack_planes_Gfx_south<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(array,
_send_Gfx_s);
if (dom[rank].t != MPI_PROC_NULL)
pack_planes_Gfx_top<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(array,
_send_Gfx_t);
if (dom[rank].b != MPI_PROC_NULL)
pack_planes_Gfx_bottom<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(array,
_send_Gfx_b);
}
extern "C"
void cuda_pack_planes_Gfy(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
pack_planes_Gfy_east<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(array,
_send_Gfy_e);
if (dom[rank].w != MPI_PROC_NULL)
pack_planes_Gfy_west<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(array,
_send_Gfy_w);
if (dom[rank].n != MPI_PROC_NULL)
pack_planes_Gfy_north<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(array,
_send_Gfy_n);
if (dom[rank].s != MPI_PROC_NULL)
pack_planes_Gfy_south<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(array,
_send_Gfy_s);
if (dom[rank].t != MPI_PROC_NULL)
pack_planes_Gfy_top<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(array,
_send_Gfy_t);
if (dom[rank].b != MPI_PROC_NULL)
pack_planes_Gfy_bottom<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(array,
_send_Gfy_b);
}
extern "C"
void cuda_pack_planes_Gfz(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
pack_planes_Gfz_east<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(array,
_send_Gfz_e);
if (dom[rank].w != MPI_PROC_NULL)
pack_planes_Gfz_west<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(array,
_send_Gfz_w);
if (dom[rank].n != MPI_PROC_NULL)
pack_planes_Gfz_north<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(array,
_send_Gfz_n);
if (dom[rank].s != MPI_PROC_NULL)
pack_planes_Gfz_south<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(array,
_send_Gfz_s);
if (dom[rank].t != MPI_PROC_NULL)
pack_planes_Gfz_top<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(array,
_send_Gfz_t);
if (dom[rank].b != MPI_PROC_NULL)
pack_planes_Gfz_bottom<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(array,
_send_Gfz_b);
}
extern "C"
void cuda_unpack_planes_Gcc(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
unpack_planes_Gcc_east<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(array,
_recv_Gcc_e);
if (dom[rank].w != MPI_PROC_NULL)
unpack_planes_Gcc_west<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(array,
_recv_Gcc_w);
if (dom[rank].n != MPI_PROC_NULL)
unpack_planes_Gcc_north<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(array,
_recv_Gcc_n);
if (dom[rank].s != MPI_PROC_NULL)
unpack_planes_Gcc_south<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(array,
_recv_Gcc_s);
if (dom[rank].t != MPI_PROC_NULL)
unpack_planes_Gcc_top<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(array,
_recv_Gcc_t);
if (dom[rank].b != MPI_PROC_NULL)
unpack_planes_Gcc_bottom<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(array,
_recv_Gcc_b);
}
extern "C"
void cuda_unpack_planes_Gfx(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
unpack_planes_Gfx_east<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(array,
_recv_Gfx_e);
if (dom[rank].w != MPI_PROC_NULL)
unpack_planes_Gfx_west<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(array,
_recv_Gfx_w);
if (dom[rank].n != MPI_PROC_NULL)
unpack_planes_Gfx_north<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(array,
_recv_Gfx_n);
if (dom[rank].s != MPI_PROC_NULL)
unpack_planes_Gfx_south<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(array,
_recv_Gfx_s);
if (dom[rank].t != MPI_PROC_NULL)
unpack_planes_Gfx_top<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(array,
_recv_Gfx_t);
if (dom[rank].b != MPI_PROC_NULL)
unpack_planes_Gfx_bottom<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(array,
_recv_Gfx_b);
}
extern "C"
void cuda_unpack_planes_Gfy(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
unpack_planes_Gfy_east<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(array,
_recv_Gfy_e);
if (dom[rank].w != MPI_PROC_NULL)
unpack_planes_Gfy_west<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(array,
_recv_Gfy_w);
if (dom[rank].n != MPI_PROC_NULL)
unpack_planes_Gfy_north<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(array,
_recv_Gfy_n);
if (dom[rank].s != MPI_PROC_NULL)
unpack_planes_Gfy_south<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(array,
_recv_Gfy_s);
if (dom[rank].t != MPI_PROC_NULL)
unpack_planes_Gfy_top<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(array,
_recv_Gfy_t);
if (dom[rank].b != MPI_PROC_NULL)
unpack_planes_Gfy_bottom<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(array,
_recv_Gfy_b);
}
extern "C"
void cuda_unpack_planes_Gfz(real *array)
{
if (dom[rank].e != MPI_PROC_NULL)
unpack_planes_Gfz_east<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(array,
_recv_Gfz_e);
if (dom[rank].w != MPI_PROC_NULL)
unpack_planes_Gfz_west<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(array,
_recv_Gfz_w);
if (dom[rank].n != MPI_PROC_NULL)
unpack_planes_Gfz_north<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(array,
_recv_Gfz_n);
if (dom[rank].s != MPI_PROC_NULL)
unpack_planes_Gfz_south<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(array,
_recv_Gfz_s);
if (dom[rank].t != MPI_PROC_NULL)
unpack_planes_Gfz_top<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(array,
_recv_Gfz_t);
if (dom[rank].b != MPI_PROC_NULL)
unpack_planes_Gfz_bottom<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(array,
_recv_Gfz_b);
}
extern "C"
void cuda_find_dt(void)
{
// Only want max values over the computational domain, not ghost domain
// Copy to new array and square the value. Then find max of that result
// and take sqrt
real *utmp;
real *vtmp;
real *wtmp;
cudaMalloc((void**) &utmp, sizeof(real)*dom[rank].Gfx.s3);
cudaMalloc((void**) &vtmp, sizeof(real)*dom[rank].Gfy.s3);
cudaMalloc((void**) &wtmp, sizeof(real)*dom[rank].Gfz.s3);
copy_u_square_noghost<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u, utmp);
copy_v_square_noghost<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v, vtmp);
copy_w_square_noghost<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w, wtmp);
// device pointers to utmp, vtmp, wtmp
thrust::device_ptr<real> t_umax(utmp);
thrust::device_ptr<real> t_vmax(vtmp);
thrust::device_ptr<real> t_wmax(wtmp);
real u_max = thrust::reduce(t_umax, t_umax + dom[rank].Gfx.s3, 0.,
thrust::maximum<real>());
real v_max = thrust::reduce(t_vmax, t_vmax + dom[rank].Gfy.s3, 0.,
thrust::maximum<real>());
real w_max = thrust::reduce(t_wmax, t_wmax + dom[rank].Gfz.s3, 0.,
thrust::maximum<real>());
u_max = sqrt(u_max);
v_max = sqrt(v_max);
w_max = sqrt(w_max);
cudaFree(utmp);
cudaFree(vtmp);
cudaFree(wtmp);
// find dt on each subdomain
dt = u_max/dom[rank].dx + 2.*nu/(dom[rank].dx * dom[rank].dx);
dt += v_max/dom[rank].dy + 2.*nu/(dom[rank].dy * dom[rank].dy);
dt += w_max/dom[rank].dz + 2.*nu/(dom[rank].dz * dom[rank].dz);
dt = CFL/dt;
// MPI reduce to find minimum timestep over all ranks
MPI_Allreduce(MPI_IN_PLACE, &dt, 1, mpi_real, MPI_MIN, MPI_COMM_WORLD);
/* An alternative method is to find max(u,v,w) over all domains, and then
* calculate dt. This will be <= the dt as it is currently calculated.
*/
}
extern "C"
void cuda_compute_forcing(void)
{
// reset forcing arrays
forcing_reset_x<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(_f_x);
forcing_reset_y<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(_f_y);
forcing_reset_z<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(_f_z);
// linearly accelerate pressure gradient from zero
real delta = ttime - p_bc_tdelay;
if (delta >= 0) {
if (gradP.xa == 0) {
gradP.x = gradP.xm;
} else if (fabs(delta*gradP.xa) > fabs(gradP.xm)) {
gradP.x = gradP.xm;
} else {
gradP.x = delta*gradP.xa;
}
if (gradP.ya == 0) {
gradP.y = gradP.ym;
} else if (fabs(delta*gradP.ya) > fabs(gradP.ym)) {
gradP.y = gradP.ym;
} else {
gradP.y = delta*gradP.ya;
}
// Turn off if PID controller is on
if (!(Kp > 0 || Ki > 0 || Kd > 0)) {
if (gradP.za == 0) {
gradP.z = gradP.zm;
} else if (fabs(delta*gradP.za) > fabs(gradP.zm)) {
gradP.z = gradP.zm;
} else {
gradP.z = delta*gradP.za;
}
}
}
gradP.z = gradP.z * cos(osci_f*ttime);
// linearly accelerate gravitational acceleration from zero
delta = ttime - g_bc_tdelay;
if (delta >= 0) {
if (g.xa == 0) {
g.x = g.xm;
} else if (fabs(delta*g.xa) > fabs(g.xm)) {
g.x = g.xm;
} else {
g.x = delta*g.xa;
}
if (g.ya == 0) {
g.y = g.ym;
} else if (fabs(delta*g.ya) > fabs(g.ym)) {
g.y = g.ym;
} else {
g.y = delta*g.ya;
}
if (g.za == 0) {
g.z = g.zm;
} else if (fabs(delta*g.za) > fabs(g.zm)) {
g.z = g.zm;
} else {
g.z = delta*g.za;
}
}
delta = ttime - p_bc_tdelay;
// PID controller
if (delta >= 0) {
if(Kp > 0 || Ki > 0 || Kd > 0) {
/* Init execution config */
// Ghost cells
int ty = bins.Gcc.jnb * (bins.Gcc.jnb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jnb >= MAX_THREADS_DIM);
int tz = bins.Gcc.knb * (bins.Gcc.knb < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.knb >= MAX_THREADS_DIM);
int by = (int) ceil((real) bins.Gcc.jnb / (real) ty);
int bz = (int) ceil((real) bins.Gcc.knb / (real) tz);
dim3 bin_num_inb(by, bz);
dim3 bin_dim_inb(ty, tz);
// No ghost
ty = bins.Gcc.jn * (bins.Gcc.jn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.jn >= MAX_THREADS_DIM);
tz = bins.Gcc.kn * (bins.Gcc.kn < MAX_THREADS_DIM)
+ MAX_THREADS_DIM * (bins.Gcc.kn >= MAX_THREADS_DIM);
by = (int) ceil((real) bins.Gcc.jn / (real) ty);
bz = (int) ceil((real) bins.Gcc.kn / (real) tz);
dim3 bin_num_in(by, bz);
dim3 bin_dim_in(ty, tz);
// Thread over nparts
int t_nparts = nparts * (nparts < MAX_THREADS_1D)
+ MAX_THREADS_1D * (nparts >= MAX_THREADS_1D);
int b_nparts = (int) ceil((real) nparts / (real) t_nparts);
dim3 dim_nparts(t_nparts);
dim3 num_nparts(b_nparts);
/* Allocate memory */
checkCudaErrors(cudaMalloc(&_part_ind, nparts * sizeof(int)));
checkCudaErrors(cudaMalloc(&_part_bin, nparts * sizeof(int)));
thrust::device_ptr<int> t_part_ind(_part_ind);
thrust::device_ptr<int> t_part_bin(_part_bin);
checkCudaErrors(cudaMemset(_bin_start, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_end, -1, bins.Gcc.s3b * sizeof(int)));
checkCudaErrors(cudaMemset(_bin_count, 0, bins.Gcc.s3b * sizeof(int)));
thrust::device_ptr<int> t_bin_count(_bin_count);
real *_wdot;
checkCudaErrors(cudaMalloc(&_wdot, bins.Gcc.s3 * sizeof(real)));
thrust::device_ptr<real> t_wdot(_wdot);
if (nparts > 0) {
/* Find each particle's bin */
bin_fill_i<<<num_nparts, dim_nparts>>>(_part_ind, _part_bin, _parts, nparts,
_DOM);
/* Sort _part_ind by _part_bin (sort key by value) */
if (nparts > 1) {
thrust::sort_by_key(t_part_bin, t_part_bin + nparts, t_part_ind);
}
/* Find start and ending index of each bin */
int smem_size = (nparts + 1) * sizeof(int);
find_bin_start_end<<<b_nparts, t_nparts, smem_size>>>(_bin_start, _bin_end,
_part_bin, nparts);
/* Find number of particles in each bin */
count_bin_parts_i<<<bin_num_inb, bin_dim_inb>>>(_bin_start, _bin_end,
_bin_count);
/* Pull wdot to an array for each bin */
pull_wdot<<<bin_num_in, bin_dim_in>>>(_wdot, _parts, _bin_start,
_bin_count, _part_ind);
} else { // nparts <= 0
checkCudaErrors(cudaMemset(_part_ind, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_part_bin, -1, nparts * sizeof(int)));
checkCudaErrors(cudaMemset(_wdot, 0., nparts * sizeof(real)));
}
real acc_z = thrust::reduce(t_wdot, t_wdot + bins.Gcc.s3, 0., thrust::plus<real>());
MPI_Allreduce(MPI_IN_PLACE, &acc_z, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
acc_z /= (real) NPARTS;
pid_int = pid_int + acc_z*dt;
gradP.z = gradP.z
+ (Kp*acc_z + Ki*pid_int/ttime + Kd*(acc_z-pid_back))*rho_avg;
pid_back = acc_z;
checkCudaErrors(cudaFree(_wdot));
checkCudaErrors(cudaFree(_part_ind));
checkCudaErrors(cudaFree(_part_bin));
}
}
// forcing
forcing_add_x_const<<<blocks.Gfx.num_inb,blocks.Gfx.dim_inb>>>(-gradP.x/rho_f,
_f_x);
forcing_add_y_const<<<blocks.Gfy.num_jnb,blocks.Gfy.dim_jnb>>>(-gradP.y/rho_f,
_f_y);
forcing_add_z_const<<<blocks.Gfz.num_knb,blocks.Gfz.dim_knb>>>(-gradP.z/rho_f,
_f_z);
}
extern "C"
void cuda_compute_turb_forcing(void)
{
if (init_cond == TURBULENT) {
/* Calculate current kinetic energy */
real *utmp;
real *vtmp;
real *wtmp;
cudaMalloc((void**) &utmp, sizeof(real)*dom[rank].Gfx.s3);
cudaMalloc((void**) &vtmp, sizeof(real)*dom[rank].Gfy.s3);
cudaMalloc((void**) &wtmp, sizeof(real)*dom[rank].Gfz.s3);
// Square entries
copy_u_square_noghost<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u, utmp);
copy_v_square_noghost<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v, vtmp);
copy_w_square_noghost<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w, wtmp);
// device pointers to utmp, vtmp, wtmp
thrust::device_ptr<real> t_utmp(utmp);
thrust::device_ptr<real> t_vtmp(vtmp);
thrust::device_ptr<real> t_wtmp(wtmp);
// Sum fields
// -- Sum should not double count staggered velocities at subdomain and
// periodic interfaces. For now, just don't loop over those points.
// -- This assumes then that we're using periodic boundary conditions
real su2 = thrust::reduce(t_utmp, t_utmp + dom[rank].Gfx.s3 - dom[rank].Gfx.s2_i,
0., thrust::plus<real>());
real sv2 = thrust::reduce(t_vtmp, t_vtmp + dom[rank].Gfy.s3 - dom[rank].Gfy.s2_j,
0., thrust::plus<real>());
real sw2 = thrust::reduce(t_wtmp, t_wtmp + dom[rank].Gfz.s3 - dom[rank].Gfz.s2_k,
0., thrust::plus<real>());
// Sum results
real k = 0.5 * (su2 + sv2 + sw2);
// Find total energy
MPI_Allreduce(MPI_IN_PLACE, &k, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
// Average
k /= DOM.Gcc.s3;
/* Find mean u,v,w velocity */
// Copy to array with no ghost cells
copy_u_noghost<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u, utmp);
copy_v_noghost<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v, vtmp);
copy_w_noghost<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w, wtmp);
// Sum
// Sum should not double count staggered vels at bouundaries
real umean = thrust::reduce(t_utmp, t_utmp + dom[rank].Gfx.s3 - dom[rank].Gfx.s2_i,
0., thrust::plus<real>());
real vmean = thrust::reduce(t_vtmp, t_vtmp + dom[rank].Gfy.s3 - dom[rank].Gfy.s2_j,
0., thrust::plus<real>());
real wmean = thrust::reduce(t_wtmp, t_wtmp + dom[rank].Gfz.s3 - dom[rank].Gfz.s2_k,
0., thrust::plus<real>());
// Reduce over all ranks
MPI_Allreduce(MPI_IN_PLACE, &umean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, &vmean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, &wmean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
// Average
umean /= DOM.Gfx.s3;
vmean /= DOM.Gfy.s3;
wmean /= DOM.Gfz.s3;
// Calculate forcing
real turb_force = turbA * turb_k0 / k;
// Add forcing to velocity field
forcing_add_x_field<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(turb_force,
_u, _f_x);
forcing_add_y_field<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(turb_force,
_v, _f_y);
forcing_add_z_field<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(turb_force,
_w, _f_z);
// Subtract mean to get perturbation
forcing_add_x_const<<<blocks.Gfx.num_inb, blocks.Gfx.dim_inb>>>(-turb_force*umean,
_f_x);
forcing_add_y_const<<<blocks.Gfy.num_jnb, blocks.Gfy.dim_jnb>>>(-turb_force*vmean,
_f_y);
forcing_add_z_const<<<blocks.Gfz.num_knb, blocks.Gfz.dim_knb>>>(-turb_force*wmean,
_f_z);
// Free
cudaFree(utmp);
cudaFree(vtmp);
cudaFree(wtmp);
/* Dissipation Rate */
real *_eps;
cudaMalloc((void**) &_eps, sizeof(real) * dom[rank].Gcc.s3);
calc_dissipation<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(_u, _v, _w, _eps);
thrust::device_ptr<real> t_eps(_eps);
real eps = thrust::reduce(t_eps, t_eps + dom[rank].Gcc.s3,
0., thrust::plus<real>());
MPI_Allreduce(MPI_IN_PLACE, &eps, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
eps *= nu / DOM.Gcc.s3;
cudaFree(_eps);
// Record this
char rname[FILE_NAME_SIZE] = "turb.rec";
recorder_turb(rname, k, eps);
}
}
extern "C"
void cuda_U_star(void)
{
calc_u_star<<<blocks.Gfx.num_inb_s, blocks.Gfx.dim_inb_s>>>(rho_f, nu, _u0,
_v0, _w0, _p0, _f_x, _diff0_u, _conv0_u, _diff_u, _conv_u, _u_star, dt0,
dt, _phase);
calc_v_star<<<blocks.Gfy.num_jnb_s, blocks.Gfy.dim_jnb_s>>>(rho_f, nu, _u0,
_v0, _w0, _p0, _f_y, _diff0_v, _conv0_v, _diff_v, _conv_v, _v_star, dt0,
dt, _phase);
calc_w_star<<<blocks.Gfy.num_knb_s, blocks.Gfz.dim_knb_s>>>(rho_f, nu, _u0,
_v0, _w0, _p0, _f_z, _diff0_w, _conv0_w, _diff_w, _conv_w, _w_star, dt0,
dt, _phase);
}
extern "C"
void cuda_dom_BC_star(void)
{
// west
if (dom[rank].w == MPI_PROC_NULL) {
// u
switch (bc.uW) {
case DIRICHLET:
BC_u_W_D<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, bc.uWD);
break;
case NEUMANN:
BC_u_W_N<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star);
break;
}
// v
switch (bc.vW) {
case DIRICHLET:
BC_v_W_D<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v_star, bc.vWD);
break;
case NEUMANN:
BC_v_W_N<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v_star);
break;
}
// w
switch (bc.wW) {
case DIRICHLET:
BC_w_W_D<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w_star, bc.wWD);
break;
case NEUMANN:
BC_w_W_N<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w_star);
break;
}
}
// east
if (dom[rank].e == MPI_PROC_NULL) {
// u
switch (bc.uE) {
case DIRICHLET:
BC_u_E_D<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, bc.uED);
break;
case NEUMANN:
BC_u_E_N<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star);
break;
}
// v
switch (bc.vE) {
case DIRICHLET:
BC_v_E_D<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v_star, bc.vED);
break;
case NEUMANN:
BC_v_E_N<<<blocks.Gfy.num_in, blocks.Gfy.dim_in>>>(_v_star);
break;
}
// w
switch (bc.wE) {
case DIRICHLET:
BC_w_E_D<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w_star, bc.wED);
break;
case NEUMANN:
BC_w_E_N<<<blocks.Gfz.num_in, blocks.Gfz.dim_in>>>(_w_star);
break;
}
}
// south
if (dom[rank].s == MPI_PROC_NULL) {
// u
switch (bc.uS) {
case DIRICHLET:
BC_u_S_D<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u_star, bc.uSD);
break;
case NEUMANN:
BC_u_S_N<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u_star);
break;
}
// v
switch (bc.vS) {
case DIRICHLET:
BC_v_S_D<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, bc.vSD);
break;
case NEUMANN:
BC_v_S_N<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star);
break;
}
// w
switch (bc.wS) {
case DIRICHLET:
BC_w_S_D<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w_star, bc.wSD);
break;
case NEUMANN:
BC_w_S_N<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w_star);
break;
}
}
// north
if (dom[rank].n == MPI_PROC_NULL) {
// u
switch (bc.uN) {
case DIRICHLET:
BC_u_N_D<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u_star, bc.uND);
break;
case NEUMANN:
BC_u_N_N<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u_star);
break;
}
// v
switch (bc.vN) {
case DIRICHLET:
BC_v_N_D<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, bc.vND);
break;
case NEUMANN:
BC_v_N_N<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star);
break;
}
// w
switch (bc.wN) {
case DIRICHLET:
BC_w_N_D<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w_star, bc.wND);
break;
case NEUMANN:
BC_w_N_N<<<blocks.Gfz.num_jn, blocks.Gfz.dim_jn>>>(_w_star);
break;
}
}
// bottom
if (dom[rank].b == MPI_PROC_NULL) {
// u
switch (bc.uB) {
case DIRICHLET:
BC_u_B_D<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u_star, bc.uBD);
break;
case NEUMANN:
BC_u_B_N<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u_star);
break;
}
// v
switch (bc.vB) {
case DIRICHLET:
BC_v_B_D<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v_star, bc.vBD);
break;
case NEUMANN:
BC_v_B_N<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v_star);
break;
}
// w
switch (bc.wB) {
case DIRICHLET:
BC_w_B_D<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, bc.wBD);
break;
case NEUMANN:
BC_w_B_N<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star);
break;
}
}
// top
if (dom[rank].t == MPI_PROC_NULL) {
// u
switch (bc.uT) {
case DIRICHLET:
BC_u_T_D<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u_star, bc.uTD);
break;
case NEUMANN:
BC_u_T_N<<<blocks.Gfx.num_kn, blocks.Gfx.dim_kn>>>(_u_star);
break;
}
// v
switch (bc.vT) {
case DIRICHLET:
BC_v_T_D<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v_star, bc.vTD);
break;
case NEUMANN:
BC_v_T_N<<<blocks.Gfy.num_kn, blocks.Gfy.dim_kn>>>(_v_star);
break;
}
// w
switch (bc.wT) {
case DIRICHLET:
BC_w_T_D<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, bc.wTD);
break;
case NEUMANN:
BC_w_T_N<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star);
break;
}
}
}
extern "C"
void cuda_solvability(void)
{
//printf("N%d >> Enforcing solvability...\n", rank);
/* Calculate difference from zero on each domain, then MPI_Allreduce that
* value. It would be better to define an MPI_COMM for the edge cells,
* but that's an optimization.
*/
/* Differences from zero on each plane */
real eps_xs = 0.;
real eps_xe = 0.;
real eps_ys = 0.;
real eps_ye = 0.;
real eps_zs = 0.;
real eps_ze = 0.;
real eps[3]; // [x, y, z]
// local reduction, then global reduction
if (dom[rank].I == DOM.Is) {
/* Temporary storage for reduction */
real *u_star_tmp;
cudaMalloc((void**) &u_star_tmp, dom[rank].Gfx.s2_i * sizeof(real));
/* Calculate x-face integral (is) */
surf_int_xs<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, u_star_tmp);
/* Reduction */
thrust::device_ptr<real> t_us_tmp(u_star_tmp);
eps_xs = thrust::reduce(t_us_tmp, t_us_tmp + dom[rank].Gfx.s2_i, 0.,
thrust::plus<real>());
eps_xs *= dom[rank].dy * dom[rank].dz;
/* clean up */
cudaFree(u_star_tmp);
}
if (dom[rank].I == DOM.Ie) {
real *u_star_tmp;
cudaMalloc((void**) &u_star_tmp, dom[rank].Gfx.s2_i * sizeof(real));
surf_int_xe<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, u_star_tmp);
thrust::device_ptr<real> t_us_tmp(u_star_tmp);
eps_xe = thrust::reduce(t_us_tmp, t_us_tmp + dom[rank].Gfx.s2_i, 0.,
thrust::plus<real>());
eps_xe *= dom[rank].dy * dom[rank].dz;
cudaFree(u_star_tmp);
}
if (dom[rank].J == DOM.Js) {
real *v_star_tmp;
cudaMalloc((void**) &v_star_tmp, dom[rank].Gfy.s2_j * sizeof(real));
surf_int_ys<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, v_star_tmp);
thrust::device_ptr<real> t_vs_tmp(v_star_tmp);
eps_ys = thrust::reduce(t_vs_tmp, t_vs_tmp + dom[rank].Gfy.s2_j, 0.,
thrust::plus<real>());
eps_ys *= dom[rank].dz * dom[rank].dx;
cudaFree(v_star_tmp);
}
if (dom[rank].J == DOM.Je) {
real *v_star_tmp;
cudaMalloc((void**) &v_star_tmp, dom[rank].Gfy.s2_j * sizeof(real));
surf_int_ye<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, v_star_tmp);
thrust::device_ptr<real> t_vs_tmp(v_star_tmp);
eps_ye = thrust::reduce(t_vs_tmp, t_vs_tmp + dom[rank].Gfy.s2_j, 0.,
thrust::plus<real>());
eps_ye *= dom[rank].dz * dom[rank].dx;
cudaFree(v_star_tmp);
}
if (dom[rank].K == DOM.Ks) {
real *w_star_tmp;
cudaMalloc((void**) &w_star_tmp, dom[rank].Gfz.s2_k * sizeof(real));
surf_int_zs<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, w_star_tmp);
thrust::device_ptr<real> t_ws_tmp(w_star_tmp);
eps_zs = thrust::reduce(t_ws_tmp, t_ws_tmp + dom[rank].Gfz.s2_k, 0.,
thrust::plus<real>());
eps_zs *= dom[rank].dx * dom[rank].dy;
cudaFree(w_star_tmp);
}
if (dom[rank].K == DOM.Ke) {
real *w_star_tmp;
cudaMalloc((void**) &w_star_tmp, dom[rank].Gfz.s2_k * sizeof(real));
surf_int_ze<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, w_star_tmp);
thrust::device_ptr<real> t_ws_tmp(w_star_tmp);
eps_ze = thrust::reduce(t_ws_tmp, t_ws_tmp + dom[rank].Gfz.s2_k, 0.,
thrust::plus<real>());
eps_ze *= dom[rank].dx * dom[rank].dy;
cudaFree(w_star_tmp);
}
/* Find difference in each direction */
eps[0] = eps_xe - eps_xs;
eps[1] = eps_ye - eps_ys;
eps[2] = eps_ze - eps_zs;
/* MPI_Allreduce */
MPI_Allreduce(MPI_IN_PLACE, &eps, 3, mpi_real, MPI_SUM, MPI_COMM_WORLD);
/* subtract eps from outflow plane */
real sum;
switch (out_plane) {
case WEST:
if (dom[rank].I == DOM.Is) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.yl * DOM.zl);
plane_eps_x_W<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, sum);
}
break;
case EAST:
if (dom[rank].I == DOM.Ie) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.yl * DOM.zl);
plane_eps_x_E<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, sum);
}
break;
case SOUTH:
if (dom[rank].J == DOM.Js) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.zl * DOM.xl);
plane_eps_y_S<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, sum);
}
break;
case NORTH:
if (dom[rank].J == DOM.Je) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.zl * DOM.xl);
plane_eps_y_N<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, sum);
}
break;
case BOTTOM:
if (dom[rank].K == DOM.Ks) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.xl * DOM.yl);
plane_eps_z_B<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, sum);
}
break;
case TOP:
if (dom[rank].K == DOM.Ke) {
sum = (eps[0] + eps[1] + eps[2])/(DOM.xl * DOM.yl);
plane_eps_z_T<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, sum);
}
break;
case HOMOGENEOUS:
// spread over entire domain
real sum_x = 0.5*eps[0]/(DOM.yl * DOM.zl);
real sum_y = 0.5*eps[1]/(DOM.zl * DOM.xl);
real sum_z = 0.5*eps[2]/(DOM.xl * DOM.yl);
if (dom[rank].I == DOM.Is)
plane_eps_x_W<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, sum_x);
if (dom[rank].I == DOM.Ie)
plane_eps_x_E<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, sum_x);
if (dom[rank].J == DOM.Js)
plane_eps_y_S<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, sum_y);
if (dom[rank].J == DOM.Je)
plane_eps_y_N<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, sum_y);
if (dom[rank].K == DOM.Ks)
plane_eps_z_B<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, sum_z);
if (dom[rank].K == DOM.Ke)
plane_eps_z_T<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, sum_z);
break;
}
}
extern "C"
void cuda_project(void)
{
project_u<<<blocks.Gfx.num_in, blocks.Gfx.dim_in>>>(_u_star, _phi, rho_f, dt,
_u, 1. / dom[rank].dx, _flag_u);
project_v<<<blocks.Gfy.num_jn, blocks.Gfy.dim_jn>>>(_v_star, _phi, rho_f, dt,
_v, 1. / dom[rank].dy, _flag_v);
project_w<<<blocks.Gfz.num_kn, blocks.Gfz.dim_kn>>>(_w_star, _phi, rho_f, dt,
_w, 1. / dom[rank].dz, _flag_w);
}
extern "C"
void cuda_update_p()
{
/* Calculate laplacian of phi and update */
real *_Lp;
cudaMalloc((void**) &_Lp, sizeof(real)*dom[rank].Gcc.s3b);
update_p_laplacian<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_Lp, _phi);
update_p<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_Lp, _p0, _p, _phi, nu,
dt, _phase);
cudaFree(_Lp);
/* set mean pressure to zero */
real *_p_mean;
cudaMalloc((void**) &_p_mean, sizeof(real)*dom[rank].Gcc.s3);
copy_p_p_noghost<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(_p_mean, _p);
thrust::device_ptr<real> t_p_mean(_p_mean);
real pmean = thrust::reduce(t_p_mean, t_p_mean + dom[rank].Gcc.s3, 0.,
thrust::plus<real>());
MPI_Allreduce(MPI_IN_PLACE, &pmean, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
pmean /= (real) DOM.Gcc.s3;
// numerical reproducibility? + associativity of floating point addition
cudaFree(_p_mean);
forcing_add_c_const<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(-pmean, _p);
}
extern "C"
void cuda_dom_BC_p(real *array)
{
// Can do this with an if/else, not switch/case
// if (bc.pW == NEUMANN)...
// Could also do this with dom[rank].I == DOM.Is
if (dom[rank].w == MPI_PROC_NULL) { // WEST
switch (bc.pW) {
case NEUMANN:
BC_p_W_N<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(array);
break;
}
}
if (dom[rank].e == MPI_PROC_NULL) { // EAST
switch (bc.pE) {
case NEUMANN:
BC_p_E_N<<<blocks.Gcc.num_in, blocks.Gcc.dim_in>>>(array);
break;
}
}
if (dom[rank].s == MPI_PROC_NULL) { // SOUTH
switch (bc.pS) {
case NEUMANN:
BC_p_S_N<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(array);
break;
}
}
if (dom[rank].n == MPI_PROC_NULL) { // NORTH
switch (bc.pN) {
case NEUMANN:
BC_p_N_N<<<blocks.Gcc.num_jn, blocks.Gcc.dim_jn>>>(array);
break;
}
}
if (dom[rank].b == MPI_PROC_NULL) { // BOTTOM
switch (bc.pB) {
case NEUMANN:
BC_p_B_N<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(array);
break;
}
}
if (dom[rank].t == MPI_PROC_NULL) { // TOP
switch (bc.pT) {
case NEUMANN:
BC_p_T_N<<<blocks.Gcc.num_kn, blocks.Gcc.dim_kn>>>(array);
break;
}
}
}
extern "C"
void cuda_store_u(void)
{
cudaMemcpy(_conv0_u, _conv_u, dom[rank].Gfx.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_conv0_v, _conv_v, dom[rank].Gfy.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_conv0_w, _conv_w, dom[rank].Gfz.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_diff0_u, _diff_u, dom[rank].Gfx.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_diff0_v, _diff_v, dom[rank].Gfy.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_diff0_w, _diff_w, dom[rank].Gfz.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_p0, _p, dom[rank].Gcc.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_u0, _u, dom[rank].Gfx.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_v0, _v, dom[rank].Gfy.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
cudaMemcpy(_w0, _w, dom[rank].Gfz.s3b*sizeof(real),
cudaMemcpyDeviceToDevice);
}
extern "C"
void cuda_dom_free(void)
{
// Free cuda memory on host
checkCudaErrors(cudaFreeHost(p));
checkCudaErrors(cudaFreeHost(p0));
checkCudaErrors(cudaFreeHost(u));
checkCudaErrors(cudaFreeHost(v));
checkCudaErrors(cudaFreeHost(w));
checkCudaErrors(cudaFreeHost(u0));
checkCudaErrors(cudaFreeHost(v0));
checkCudaErrors(cudaFreeHost(w0));
checkCudaErrors(cudaFreeHost(conv_u));
checkCudaErrors(cudaFreeHost(conv_v));
checkCudaErrors(cudaFreeHost(conv_w));
checkCudaErrors(cudaFreeHost(conv0_u));
checkCudaErrors(cudaFreeHost(conv0_v));
checkCudaErrors(cudaFreeHost(conv0_w));
checkCudaErrors(cudaFreeHost(diff_u));
checkCudaErrors(cudaFreeHost(diff_v));
checkCudaErrors(cudaFreeHost(diff_w));
checkCudaErrors(cudaFreeHost(diff0_u));
checkCudaErrors(cudaFreeHost(diff0_v));
checkCudaErrors(cudaFreeHost(diff0_w));
checkCudaErrors(cudaFreeHost(f_x));
checkCudaErrors(cudaFreeHost(f_y));
checkCudaErrors(cudaFreeHost(f_z));
checkCudaErrors(cudaFreeHost(u_star));
checkCudaErrors(cudaFreeHost(v_star));
checkCudaErrors(cudaFreeHost(w_star));
checkCudaErrors(cudaFreeHost(flag_u));
checkCudaErrors(cudaFreeHost(flag_v));
checkCudaErrors(cudaFreeHost(flag_w));
checkCudaErrors(cudaFreeHost(phi));
// Free cuda memory on device
checkCudaErrors(cudaFree(_DOM));
checkCudaErrors(cudaFree(_bc));
checkCudaErrors(cudaFree(_p));
checkCudaErrors(cudaFree(_p0));
checkCudaErrors(cudaFree(_phi));
checkCudaErrors(cudaFree(_phinoghost));
checkCudaErrors(cudaFree(_invM));
checkCudaErrors(cudaFree(_u));
checkCudaErrors(cudaFree(_v));
checkCudaErrors(cudaFree(_w));
checkCudaErrors(cudaFree(_u0));
checkCudaErrors(cudaFree(_v0));
checkCudaErrors(cudaFree(_w0));
checkCudaErrors(cudaFree(_conv_u));
checkCudaErrors(cudaFree(_conv_v));
checkCudaErrors(cudaFree(_conv_w));
checkCudaErrors(cudaFree(_conv0_u));
checkCudaErrors(cudaFree(_conv0_v));
checkCudaErrors(cudaFree(_conv0_w));
checkCudaErrors(cudaFree(_diff_u));
checkCudaErrors(cudaFree(_diff_v));
checkCudaErrors(cudaFree(_diff_w));
checkCudaErrors(cudaFree(_diff0_u));
checkCudaErrors(cudaFree(_diff0_v));
checkCudaErrors(cudaFree(_diff0_w));
checkCudaErrors(cudaFree(_f_x));
checkCudaErrors(cudaFree(_f_y));
checkCudaErrors(cudaFree(_f_z));
checkCudaErrors(cudaFree(_u_star));
checkCudaErrors(cudaFree(_v_star));
checkCudaErrors(cudaFree(_w_star));
checkCudaErrors(cudaFree(_flag_u));
checkCudaErrors(cudaFree(_flag_v));
checkCudaErrors(cudaFree(_flag_w));
checkCudaErrors(cudaFree(_rhs_p));
checkCudaErrors(cudaFree(_r_q));
checkCudaErrors(cudaFree(_z_q));
//checkCudaErrors(cudaFree(_rs_0));
checkCudaErrors(cudaFree(_p_q));
checkCudaErrors(cudaFree(_pb_q));
//checkCudaErrors(cudaFree(_s_q));
//checkCudaErrors(cudaFree(_sb_q));
checkCudaErrors(cudaFree(_Apb_q));
//checkCudaErrors(cudaFree(_Asb_q));
checkCudaErrors(cudaFree(_send_Gcc_e));
checkCudaErrors(cudaFree(_send_Gcc_w));
checkCudaErrors(cudaFree(_send_Gcc_n));
checkCudaErrors(cudaFree(_send_Gcc_s));
checkCudaErrors(cudaFree(_send_Gcc_t));
checkCudaErrors(cudaFree(_send_Gcc_b));
checkCudaErrors(cudaFree(_send_Gfx_e));
checkCudaErrors(cudaFree(_send_Gfx_w));
checkCudaErrors(cudaFree(_send_Gfx_n));
checkCudaErrors(cudaFree(_send_Gfx_s));
checkCudaErrors(cudaFree(_send_Gfx_t));
checkCudaErrors(cudaFree(_send_Gfx_b));
checkCudaErrors(cudaFree(_send_Gfy_e));
checkCudaErrors(cudaFree(_send_Gfy_w));
checkCudaErrors(cudaFree(_send_Gfy_n));
checkCudaErrors(cudaFree(_send_Gfy_s));
checkCudaErrors(cudaFree(_send_Gfy_t));
checkCudaErrors(cudaFree(_send_Gfy_b));
checkCudaErrors(cudaFree(_send_Gfz_e));
checkCudaErrors(cudaFree(_send_Gfz_w));
checkCudaErrors(cudaFree(_send_Gfz_n));
checkCudaErrors(cudaFree(_send_Gfz_s));
checkCudaErrors(cudaFree(_send_Gfz_t));
checkCudaErrors(cudaFree(_send_Gfz_b));
checkCudaErrors(cudaFree(_recv_Gcc_e));
checkCudaErrors(cudaFree(_recv_Gcc_w));
checkCudaErrors(cudaFree(_recv_Gcc_n));
checkCudaErrors(cudaFree(_recv_Gcc_s));
checkCudaErrors(cudaFree(_recv_Gcc_t));
checkCudaErrors(cudaFree(_recv_Gcc_b));
checkCudaErrors(cudaFree(_recv_Gfx_e));
checkCudaErrors(cudaFree(_recv_Gfx_w));
checkCudaErrors(cudaFree(_recv_Gfx_n));
checkCudaErrors(cudaFree(_recv_Gfx_s));
checkCudaErrors(cudaFree(_recv_Gfx_t));
checkCudaErrors(cudaFree(_recv_Gfx_b));
checkCudaErrors(cudaFree(_recv_Gfy_e));
checkCudaErrors(cudaFree(_recv_Gfy_w));
checkCudaErrors(cudaFree(_recv_Gfy_n));
checkCudaErrors(cudaFree(_recv_Gfy_s));
checkCudaErrors(cudaFree(_recv_Gfy_t));
checkCudaErrors(cudaFree(_recv_Gfy_b));
checkCudaErrors(cudaFree(_recv_Gfz_e));
checkCudaErrors(cudaFree(_recv_Gfz_w));
checkCudaErrors(cudaFree(_recv_Gfz_n));
checkCudaErrors(cudaFree(_recv_Gfz_s));
checkCudaErrors(cudaFree(_recv_Gfz_t));
checkCudaErrors(cudaFree(_recv_Gfz_b));
// Reset devices
checkCudaErrors(cudaDeviceReset());
}
// Miscellaneous functions
extern "C"
void cuda_wall_shear_stress()
{
real *_dudy;
cudaMalloc(&_dudy, dom[rank].Gfx.s2_j * sizeof(real));
cudaMemset(_dudy, 0., dom[rank].Gfx.s2_j);
thrust::device_ptr<real> t_dudy(_dudy);
real dudy_s = 0.;
real dudy_n = 0.;
// On south face
if (dom[rank].J == DOM.Js) {
calc_dudy<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u, _dudy,
dom[rank].Gfx._jsb);
dudy_s = thrust::reduce(t_dudy, t_dudy + dom[rank].Gfx.s2_j, 0.,
thrust::plus<real>());
} else {
dudy_s = 0.;
}
MPI_Allreduce(MPI_IN_PLACE, &dudy_s, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
dudy_s /= DOM.Gfx.s2_j;
// On north face
if (dom[rank].J == DOM.Je) {
calc_dudy<<<blocks.Gfx.num_jn, blocks.Gfx.dim_jn>>>(_u, _dudy,
dom[rank].Gfx._je);
dudy_n = thrust::reduce(t_dudy, t_dudy + dom[rank].Gfx.s2_j, 0.,
thrust::plus<real>());
} else {
dudy_n = 0.;
}
MPI_Allreduce(MPI_IN_PLACE, &dudy_n, 1, mpi_real, MPI_SUM, MPI_COMM_WORLD);
dudy_n /= DOM.Gfx.s2_j;
// Open file for writing
if (rank == 0) {
char fname[FILE_NAME_SIZE];
sprintf(fname, "%s/%s/wss.dat", ROOT_DIR, OUTPUT_DIR);
FILE *file;
if (stepnum == 1) {
file = fopen(fname, "w");
if (file == NULL) {
fprintf(stderr, "Could not open file %s\n", fname);
exit(EXIT_FAILURE);
}
fprintf(file, "%-9s", "stepnum");
fprintf(file, "%-11s", "ttime");
fprintf(file, "%-11s", "wss-s");
fprintf(file, "%-11s", "wss-n");
} else {
file = fopen(fname, "a");
if (file == NULL) {
fprintf(stderr, "Could not open file %s\n", fname);
exit(EXIT_FAILURE);
}
}
fprintf(file, "\n");
fprintf(file, "%-9d", stepnum);
fprintf(file, "%-11.3e", ttime);
fprintf(file, "%-11.3e", rho_f*nu*dudy_s);
fprintf(file, "%-11.3e", rho_f*nu*dudy_n);
fclose(file);
}
// Free
cudaFree(_dudy);
}
|
d5ae1dabad0105f9191ff6cde143ebf223bcab0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudagpu.cuh"
CudaGPU::CudaGPU(int devNum)
{
id = devNum;
printf("Starting CUDA device query...\n");
int deviceCount = 0;
CUDA_CHECK(hipGetDeviceCount(&deviceCount));
CUDA_CHECK(hipSetDevice(id));
CUDA_CHECK(hipGetDeviceProperties(&prop, id));
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s), choosed device %d\n", deviceCount, id);
}
(cuMemGetInfo(&free_mem, &total_mem));
}
CudaGPU::~CudaGPU()
{
hipDeviceReset();
}
void CudaGPU::setDeviceID(int val)
{
id = val;
}
int CudaGPU::getDeviceID()
{
return id;
}
hipDeviceProp_t CudaGPU::getProperties()
{
return prop;
}
int CudaGPU::checkMemory(size_t size, bool print)
{
if(size != 0 && print)
printf("GPU free mem: (%.2f/%.2f) MBytes\n", (float)free_mem/(1024*1024), (float)total_mem/(1024*1024));
if(free_mem < size)
return 0;
free_mem = free_mem - size;
return 1;
}
| d5ae1dabad0105f9191ff6cde143ebf223bcab0a.cu | #include "cudagpu.cuh"
CudaGPU::CudaGPU(int devNum)
{
id = devNum;
printf("Starting CUDA device query...\n");
int deviceCount = 0;
CUDA_CHECK(cudaGetDeviceCount(&deviceCount));
CUDA_CHECK(cudaSetDevice(id));
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s), choosed device %d\n", deviceCount, id);
}
(cuMemGetInfo(&free_mem, &total_mem));
}
CudaGPU::~CudaGPU()
{
cudaDeviceReset();
}
void CudaGPU::setDeviceID(int val)
{
id = val;
}
int CudaGPU::getDeviceID()
{
return id;
}
cudaDeviceProp CudaGPU::getProperties()
{
return prop;
}
int CudaGPU::checkMemory(size_t size, bool print)
{
if(size != 0 && print)
printf("GPU free mem: (%.2f/%.2f) MBytes\n", (float)free_mem/(1024*1024), (float)total_mem/(1024*1024));
if(free_mem < size)
return 0;
free_mem = free_mem - size;
return 1;
}
|
8c528f05f3ea2637876fb3e8db0bc4f00f0055f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
namespace HugeCTR {
namespace {
// memset liner data to the buffer
template <typename Type>
__global__ void memset_liner_kernel(Type *data, const Type start_value, const Type stride_value,
size_t n) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
data[gid] = start_value + gid * stride_value;
}
}
// memset constant data to the buffer
template <typename Type>
__global__ void memset_const_kernel(Type *data, const Type value, long long n) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
data[gid] = value;
}
}
// get hash_value by value_index from hash_table_value matrix
template <typename TypeValueIndex>
__global__ void get_hash_value_kernel(long long count, int embedding_vec_size,
const TypeValueIndex *value_index,
const float *hash_table_value, float *value_retrieved) {
int tid = threadIdx.x;
size_t bid = blockIdx.x;
if (bid < count && tid < embedding_vec_size) {
size_t index = value_index[bid]; // row number in the hash_table_value matrix
value_retrieved[bid * embedding_vec_size + tid] =
hash_table_value[index * embedding_vec_size + tid];
}
}
} // namespace
template <typename Type>
void SparseEmbeddingFunctors::memset_liner(Type *data, Type start_value, Type stride_value,
size_t n, hipStream_t stream) const {
const size_t block_size = 256;
const size_t grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( memset_liner_kernel), dim3(grid_size), dim3(block_size), 0, stream, data, start_value, stride_value, n);
}
void SparseEmbeddingFunctors::memset_const(size_t *data, size_t value, size_t n,
hipStream_t stream) const {
const size_t block_size = 256;
const size_t grid_size = (n + block_size - 1) / block_size;
hipLaunchKernelGGL(( memset_const_kernel), dim3(grid_size), dim3(block_size), 0, stream, data, value, n);
}
void SparseEmbeddingFunctors::get_hash_value(size_t count, size_t embedding_vec_size,
const size_t *value_index,
const float *hash_table_value, float *value_retrieved,
hipStream_t stream) const {
const size_t block_size = embedding_vec_size;
const size_t grid_size = count;
hipLaunchKernelGGL(( get_hash_value_kernel), dim3(grid_size), dim3(block_size), 0, stream,
count, embedding_vec_size, value_index, hash_table_value, value_retrieved);
}
template void SparseEmbeddingFunctors::memset_liner<unsigned int>(unsigned int *data,
unsigned int start_value,
unsigned int stride_value,
size_t n,
hipStream_t stream) const;
template void SparseEmbeddingFunctors::memset_liner<long long>(long long *data,
long long start_value,
long long stride_value, size_t n,
hipStream_t stream) const;
template void SparseEmbeddingFunctors::memset_liner<size_t>(size_t *data, size_t start_value,
size_t stride_value, size_t n,
hipStream_t stream) const;
} // namespace HugeCTR | 8c528f05f3ea2637876fb3e8db0bc4f00f0055f3.cu |
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
namespace HugeCTR {
namespace {
// memset liner data to the buffer
template <typename Type>
__global__ void memset_liner_kernel(Type *data, const Type start_value, const Type stride_value,
size_t n) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
data[gid] = start_value + gid * stride_value;
}
}
// memset constant data to the buffer
template <typename Type>
__global__ void memset_const_kernel(Type *data, const Type value, long long n) {
size_t gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n) {
data[gid] = value;
}
}
// get hash_value by value_index from hash_table_value matrix
template <typename TypeValueIndex>
__global__ void get_hash_value_kernel(long long count, int embedding_vec_size,
const TypeValueIndex *value_index,
const float *hash_table_value, float *value_retrieved) {
int tid = threadIdx.x;
size_t bid = blockIdx.x;
if (bid < count && tid < embedding_vec_size) {
size_t index = value_index[bid]; // row number in the hash_table_value matrix
value_retrieved[bid * embedding_vec_size + tid] =
hash_table_value[index * embedding_vec_size + tid];
}
}
} // namespace
template <typename Type>
void SparseEmbeddingFunctors::memset_liner(Type *data, Type start_value, Type stride_value,
size_t n, cudaStream_t stream) const {
const size_t block_size = 256;
const size_t grid_size = (n + block_size - 1) / block_size;
memset_liner_kernel<<<grid_size, block_size, 0, stream>>>(data, start_value, stride_value, n);
}
void SparseEmbeddingFunctors::memset_const(size_t *data, size_t value, size_t n,
cudaStream_t stream) const {
const size_t block_size = 256;
const size_t grid_size = (n + block_size - 1) / block_size;
memset_const_kernel<<<grid_size, block_size, 0, stream>>>(data, value, n);
}
void SparseEmbeddingFunctors::get_hash_value(size_t count, size_t embedding_vec_size,
const size_t *value_index,
const float *hash_table_value, float *value_retrieved,
cudaStream_t stream) const {
const size_t block_size = embedding_vec_size;
const size_t grid_size = count;
get_hash_value_kernel<<<grid_size, block_size, 0, stream>>>(
count, embedding_vec_size, value_index, hash_table_value, value_retrieved);
}
template void SparseEmbeddingFunctors::memset_liner<unsigned int>(unsigned int *data,
unsigned int start_value,
unsigned int stride_value,
size_t n,
cudaStream_t stream) const;
template void SparseEmbeddingFunctors::memset_liner<long long>(long long *data,
long long start_value,
long long stride_value, size_t n,
cudaStream_t stream) const;
template void SparseEmbeddingFunctors::memset_liner<size_t>(size_t *data, size_t start_value,
size_t stride_value, size_t n,
cudaStream_t stream) const;
} // namespace HugeCTR |
3b448c4e2a37e8b3f9a81ea9e9bca2fdd3f7c59a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
top_descs_[i], top_data + top_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
Dtype alpha = 1.;
CUDNN_CHECK(cudnnAddTensor4d(handle_[g], CUDNN_ADD_SAME_C, &alpha,
bias_desc_, bias_data + bias_offset_ * g,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff);
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
top_descs_[i], top_diff + top_offset_ * g,
bias_desc_, bias_diff + bias_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
filter_desc_, weight_diff + weight_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bottom_descs_[i], bottom_diff + bottom_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, );
}
}
INSTANTIATE_CLASS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
| 3b448c4e2a37e8b3f9a81ea9e9bca2fdd3f7c59a.cu | #ifdef USE_CUDNN
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
__global__ void sync_conv_groups() { }
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < bottom.size(); ++i) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* top_data = top[i]->mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
// Forward through cuDNN in parallel over groups.
for (int g = 0; g < this->group_; g++) {
// Filters.
CUDNN_CHECK(cudnnConvolutionForward(handle_[g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
filter_desc_, weight + weight_offset_ * g,
conv_descs_[i],
top_descs_[i], top_data + top_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
// Bias.
if (this->bias_term_) {
const Dtype* bias_data = this->blobs_[1]->gpu_data();
Dtype alpha = 1.;
CUDNN_CHECK(cudnnAddTensor4d(handle_[g], CUDNN_ADD_SAME_C, &alpha,
bias_desc_, bias_data + bias_offset_ * g,
top_descs_[i], top_data + top_offset_ * g));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
template <typename Dtype>
void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = NULL;
Dtype* weight_diff = NULL;
if (this->param_propagate_down_[0]) {
weight = this->blobs_[0]->gpu_data();
weight_diff = this->blobs_[0]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff);
}
Dtype* bias_diff = NULL;
if (this->bias_term_ && this->param_propagate_down_[1]) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0), bias_diff);
}
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
// Backward through cuDNN in parallel over groups and gradients.
for (int g = 0; g < this->group_; g++) {
// Gradient w.r.t. bias.
if (this->bias_term_ && this->param_propagate_down_[1]) {
CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g],
top_descs_[i], top_diff + top_offset_ * g,
bias_desc_, bias_diff + bias_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. weights.
if (this->param_propagate_down_[0]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
CUDNN_CHECK(cudnnConvolutionBackwardFilter(handle_[1*this->group_ + g],
bottom_descs_[i], bottom_data + bottom_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
filter_desc_, weight_diff + weight_offset_ * g,
CUDNN_RESULT_ACCUMULATE));
}
// Gradient w.r.t. bottom data.
if (propagate_down[i]) {
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
CUDNN_CHECK(cudnnConvolutionBackwardData(handle_[2*this->group_ + g],
filter_desc_, weight + weight_offset_ * g,
top_descs_[i], top_diff + top_offset_ * g,
conv_descs_[i],
bottom_descs_[i], bottom_diff + bottom_offset_ * g,
CUDNN_RESULT_NO_ACCUMULATE));
}
}
// Synchronize the work across groups, each of which went into its own
// stream, by launching an empty kernel into the default (null) stream.
// NOLINT_NEXT_LINE(whitespace/operators)
sync_conv_groups<<<1, 1>>>();
}
}
INSTANTIATE_CLASS(CuDNNConvolutionLayer);
} // namespace caffe
#endif
|
c9153f7681f8b6f8023fc8402bd2ad7d261c6c4c.hip | // !!! This is a file automatically generated by hipify!!!
#include "Ogolne.h"
#ifndef FUNCTIONS_CU
#define FUNCTIONS_CU
void initialPermutations(int** hostArray, int populationSize, int instanceSize) {
const int maxInt = MAXINT;
//initial permutations
for(int i = 0; i < populationSize; i++) {
for(int j = 0; j < instanceSize; j++) {
hostArray[i][j] = j;
}
//hostArray[i][instanceSize] = maxInt; //poczatkowa inicjalizacja
//hostArray[i][instanceSize + 1] = 0;
// FisherYates
for(int j = instanceSize - 1; j > 0; j--) {
int x = rand() % (j + 1);
int tmp = hostArray[i][x];
hostArray[i][x] = hostArray[i][j];
hostArray[i][j] = tmp;
}
}
}
void createCuda2DArrayInt(int**& hostPtr, int**& devicePtr, int** data, int rows, int columns) {
hostPtr = (int**)malloc((rows) * sizeof(int*));
for(int i = 0; i < rows; i++) {
hipMalloc((void**)&hostPtr[i], columns * sizeof(int));
if(data != NULL) {
hipMemcpy(hostPtr[i], &data[i][0], columns * sizeof(int), hipMemcpyHostToDevice);
}
}
hipMalloc((void ***)&devicePtr, rows * sizeof(int*));
hipMemcpy(devicePtr, hostPtr, rows * sizeof(int*), hipMemcpyHostToDevice);
}
void createCuda2DArrayFloat(float**& hostPtr, float**& devicePtr, float** data, int rows, int columns) {
hostPtr = (float**)malloc((rows) * sizeof(float*));
for(int i = 0; i < rows; i++) {
hipMalloc((void**)&hostPtr[i], columns * sizeof(float));
if(data != NULL) {
hipMemcpy(hostPtr[i], &data[i][0], columns * sizeof(float), hipMemcpyHostToDevice);
}
}
hipMalloc((void ***)&devicePtr, rows * sizeof(float*));
hipMemcpy(devicePtr, hostPtr, rows * sizeof(float*), hipMemcpyHostToDevice);
}
void destroyCuda2DArray(int**& hostPtr, int**& devicePtr, int rows) {
for (int i = 0; i < rows; i++) {
hipFree(hostPtr[i]);
}
hipFree(devicePtr);
free(hostPtr);
}
int testRouteArrayReversed(int** deviceArray, int rows, int columns, char* message) {
bool* test = (bool*)malloc(rows * sizeof(bool));
for(int i = 0 ; i < rows ; i++){
test[i] = false;
}
int** hostArray = (int**)malloc(rows * sizeof(int*));
for(int i = 0; i < rows; i++) {
hostArray[i] = (int*)malloc(columns * sizeof(int));
}
for(int i = 0; i < rows; i++) {
hipMemcpy(hostArray[i], deviceArray[i], columns * sizeof(int), hipMemcpyDeviceToHost);
}
for(int i = 0 ; i < columns; i++) {
/*printf( "\n\n");
for(int k = 0; k < rows; k++) {
printf( "%d -> ", hostArray[k][i]);
}*/
for(int j = 0 ; j < rows; j++) {
/*if(test[hostArray[j][i]] == false) {
test[hostArray[j][i]] = true;
} else {
fprintf(stderr, "\nRow: %d, Col: %d, Val: %d, Message: %s\n", j, i, hostArray[j][i], message);
for(int k = 0; k < rows; k++) {
fprintf(stderr, "%d -> ", hostArray[k][i]);
}
return 1;
}*/
printf("%d -> ", hostArray[j][i]);
}
return 1;
printf("___________________________________\n\n");
for(int i = 0 ; i < rows ; i++){
test[i] = false;
}
}
for(int i = 0; i < rows; i++) {
free(hostArray[i]);
}
free(hostArray);
free(test);
return 1;
}
int testRouteArray(int** deviceArray, int rows, int columns, char* message) {
bool* test = (bool*)malloc(columns * sizeof(bool));
for(int i = 0 ; i < columns ; i++){
test[i] = false;
}
int** hostArray = (int**)malloc(rows * sizeof(int*));
for(int i = 0; i < rows; i++) {
hostArray[i] = (int*)malloc(columns * sizeof(int));
}
for(int i = 0; i < rows; i++) {
hipMemcpy(hostArray[i], deviceArray[i], columns * sizeof(int), hipMemcpyDeviceToHost);
}
for(int i = 0 ; i < rows; i++) {
for(int j = 0 ; j < columns; j++) {
if(test[hostArray[i][j]] == false) {
test[hostArray[i][j]] = true;
} else {
fprintf(stderr, "\nRow: %d, Col: %d, Val: %d, Message: %s\n", i, j, hostArray[i][j], message);
for(int k = 0; k < columns; k++) {
fprintf(stderr, "%d -> ", hostArray[i][k]);
}
getchar();
return 1;
}
}
for(int i = 0 ; i < columns ; i++) {
test[i] = false;
}
}
free(test);
for(int i = 0; i < rows; i++) {
free(hostArray[i]);
}
free(hostArray);
}
#endif | c9153f7681f8b6f8023fc8402bd2ad7d261c6c4c.cu | #include "Ogolne.h"
#ifndef FUNCTIONS_CU
#define FUNCTIONS_CU
void initialPermutations(int** hostArray, int populationSize, int instanceSize) {
const int maxInt = MAXINT;
//initial permutations
for(int i = 0; i < populationSize; i++) {
for(int j = 0; j < instanceSize; j++) {
hostArray[i][j] = j;
}
//hostArray[i][instanceSize] = maxInt; //poczatkowa inicjalizacja
//hostArray[i][instanceSize + 1] = 0;
// Fisher–Yates
for(int j = instanceSize - 1; j > 0; j--) {
int x = rand() % (j + 1);
int tmp = hostArray[i][x];
hostArray[i][x] = hostArray[i][j];
hostArray[i][j] = tmp;
}
}
}
void createCuda2DArrayInt(int**& hostPtr, int**& devicePtr, int** data, int rows, int columns) {
hostPtr = (int**)malloc((rows) * sizeof(int*));
for(int i = 0; i < rows; i++) {
cudaMalloc((void**)&hostPtr[i], columns * sizeof(int));
if(data != NULL) {
cudaMemcpy(hostPtr[i], &data[i][0], columns * sizeof(int), cudaMemcpyHostToDevice);
}
}
cudaMalloc((void ***)&devicePtr, rows * sizeof(int*));
cudaMemcpy(devicePtr, hostPtr, rows * sizeof(int*), cudaMemcpyHostToDevice);
}
void createCuda2DArrayFloat(float**& hostPtr, float**& devicePtr, float** data, int rows, int columns) {
hostPtr = (float**)malloc((rows) * sizeof(float*));
for(int i = 0; i < rows; i++) {
cudaMalloc((void**)&hostPtr[i], columns * sizeof(float));
if(data != NULL) {
cudaMemcpy(hostPtr[i], &data[i][0], columns * sizeof(float), cudaMemcpyHostToDevice);
}
}
cudaMalloc((void ***)&devicePtr, rows * sizeof(float*));
cudaMemcpy(devicePtr, hostPtr, rows * sizeof(float*), cudaMemcpyHostToDevice);
}
void destroyCuda2DArray(int**& hostPtr, int**& devicePtr, int rows) {
for (int i = 0; i < rows; i++) {
cudaFree(hostPtr[i]);
}
cudaFree(devicePtr);
free(hostPtr);
}
int testRouteArrayReversed(int** deviceArray, int rows, int columns, char* message) {
bool* test = (bool*)malloc(rows * sizeof(bool));
for(int i = 0 ; i < rows ; i++){
test[i] = false;
}
int** hostArray = (int**)malloc(rows * sizeof(int*));
for(int i = 0; i < rows; i++) {
hostArray[i] = (int*)malloc(columns * sizeof(int));
}
for(int i = 0; i < rows; i++) {
cudaMemcpy(hostArray[i], deviceArray[i], columns * sizeof(int), cudaMemcpyDeviceToHost);
}
for(int i = 0 ; i < columns; i++) {
/*printf( "\n\n");
for(int k = 0; k < rows; k++) {
printf( "%d -> ", hostArray[k][i]);
}*/
for(int j = 0 ; j < rows; j++) {
/*if(test[hostArray[j][i]] == false) {
test[hostArray[j][i]] = true;
} else {
fprintf(stderr, "\nRow: %d, Col: %d, Val: %d, Message: %s\n", j, i, hostArray[j][i], message);
for(int k = 0; k < rows; k++) {
fprintf(stderr, "%d -> ", hostArray[k][i]);
}
return 1;
}*/
printf("%d -> ", hostArray[j][i]);
}
return 1;
printf("___________________________________\n\n");
for(int i = 0 ; i < rows ; i++){
test[i] = false;
}
}
for(int i = 0; i < rows; i++) {
free(hostArray[i]);
}
free(hostArray);
free(test);
return 1;
}
int testRouteArray(int** deviceArray, int rows, int columns, char* message) {
bool* test = (bool*)malloc(columns * sizeof(bool));
for(int i = 0 ; i < columns ; i++){
test[i] = false;
}
int** hostArray = (int**)malloc(rows * sizeof(int*));
for(int i = 0; i < rows; i++) {
hostArray[i] = (int*)malloc(columns * sizeof(int));
}
for(int i = 0; i < rows; i++) {
cudaMemcpy(hostArray[i], deviceArray[i], columns * sizeof(int), cudaMemcpyDeviceToHost);
}
for(int i = 0 ; i < rows; i++) {
for(int j = 0 ; j < columns; j++) {
if(test[hostArray[i][j]] == false) {
test[hostArray[i][j]] = true;
} else {
fprintf(stderr, "\nRow: %d, Col: %d, Val: %d, Message: %s\n", i, j, hostArray[i][j], message);
for(int k = 0; k < columns; k++) {
fprintf(stderr, "%d -> ", hostArray[i][k]);
}
getchar();
return 1;
}
}
for(int i = 0 ; i < columns ; i++) {
test[i] = false;
}
}
free(test);
for(int i = 0; i < rows; i++) {
free(hostArray[i]);
}
free(hostArray);
}
#endif |
f0c1b382e8b1009862eff2a019f515b67781490a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_gtest.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/type_lists.hpp>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
template <typename T>
struct CopyTest : public cudf::test::BaseFixture {};
TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypes);
// to keep names shorter
#define wrapper cudf::test::fixed_width_column_wrapper
using bool_wrapper = wrapper<cudf::experimental::bool8>;
TYPED_TEST(CopyTest, CopyIfElseTestShort)
{
using T = TypeParam;
// short one. < 1 warp/bitmask length
int num_els = 4;
bool mask[] = { 1, 0, 0, 0 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5 };
bool lhs_v[] = { 1, 1, 1, 1 };
wrapper<T> lhs_w(lhs, lhs + num_els, lhs_v);
T rhs[] = { 6, 6, 6, 6 };
bool rhs_v[] = { 1, 1, 1, 1 };
wrapper<T> rhs_w(rhs, rhs + num_els, rhs_v);
T expected[] = { 5, 6, 6, 6 };
// bool exp_v[] = { 1, 1, 1, 1 };
wrapper<T> expected_w(expected, expected + num_els);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestManyNulls)
{
using T = TypeParam;
// bunch of nulls in output, non-aligned # of elements
int num_els = 7;
bool mask[] = { 1, 0, 0, 0, 0, 0, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5, 5, 5, 5 };
bool lhs_v[] = { 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> lhs_w(lhs, lhs + num_els, lhs_v);
T rhs[] = { 6, 6, 6, 6, 6, 6, 6 };
bool rhs_v[] = { 1, 0, 0, 0, 0, 0, 1 };
wrapper<T> rhs_w(rhs, rhs + num_els, rhs_v);
T expected[] = { 5, 6, 6, 6, 6, 6, 5 };
bool exp_v[] = { 1, 0, 0, 0, 0, 0, 1 };
wrapper<T> expected_w(expected, expected + num_els, exp_v);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
struct copy_if_else_tiny_grid_functor {
template <typename T, typename Filter>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::mr::device_memory_resource *mr,
hipStream_t stream)
{
// output
std::unique_ptr<cudf::column> out = cudf::experimental::allocate_like(lhs, lhs.size(), cudf::experimental::mask_allocation_policy::RETAIN, mr);
// device views
auto lhs_dv = cudf::column_device_view::create(lhs);
auto rhs_dv = cudf::column_device_view::create(rhs);
auto out_dv = cudf::mutable_column_device_view::create(*out);
// call the kernel with an artificially small grid
hipLaunchKernelGGL(( cudf::experimental::detail::copy_if_else_kernel<32, T, Filter, false>), dim3(1), dim3(32), 0, stream,
*lhs_dv, *rhs_dv, filter, *out_dv, nullptr);
return out;
}
};
std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& boolean_mask)
{
auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask);
cudf::column_device_view bool_mask_device = *bool_mask_device_p;
auto filter = [bool_mask_device] __device__ (cudf::size_type i) { return bool_mask_device.element<cudf::experimental::bool8>(i); };
return cudf::experimental::type_dispatcher(lhs.type(),
copy_if_else_tiny_grid_functor{},
lhs,
rhs,
filter,
rmm::mr::get_default_resource(),
(hipStream_t)0);
}
TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = { 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + num_els);
T rhs[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els);
T expected[] = { 5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
wrapper<T> expected_w(expected, expected + num_els);
auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestLong)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = { 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
bool lhs_v[] = { 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> lhs_w(lhs, lhs + num_els, lhs_v);
T rhs[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 };
bool rhs_v[] = { 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> rhs_w(rhs, rhs + num_els, rhs_v);
T expected[] = { 5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
bool exp_v[] = { 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> expected_w(expected, expected + num_els, exp_v);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs)
{
using T = TypeParam;
int num_els = 0;
bool mask[] = {};
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = {};
wrapper<T> lhs_w(lhs, lhs + num_els);
T rhs[] = {};
wrapper<T> rhs_w(rhs, rhs + num_els);
T expected[] = {};
wrapper<T> expected_w(expected, expected + num_els);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = { 1, 0, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + num_els);
T rhs[] = { 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els, mask);
T expected[] = { 5, 6, 5, 5 };
wrapper<T> expected_w(expected, expected + num_els, mask);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseBadInputLength)
{
using T = TypeParam;
int num_els = 4;
// mask length mismatch
{
bool mask[] = { 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + 3);
T lhs[] = { 5, 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + num_els, mask);
T rhs[] = { 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els, mask);
EXPECT_THROW( cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w),
cudf::logic_error);
}
// column length mismatch
{
bool mask[] = { 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + 3, mask);
T rhs[] = { 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els, mask);
EXPECT_THROW( cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w),
cudf::logic_error);
}
}
struct CopyTestUntyped : public cudf::test::BaseFixture {};
TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch)
{
int num_els = 4;
bool mask[] = { 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
cudf::column _mask(mask_w);
float lhs[] = { 5, 5, 5, 5 };
wrapper<float> lhs_w(lhs, lhs + num_els, mask);
cudf::column _lhs(lhs_w);
int rhs[] = { 6, 6, 6, 6 };
wrapper<int> rhs_w(rhs, rhs + num_els, mask);
cudf::column _rhs(rhs_w);
EXPECT_THROW( cudf::experimental::copy_if_else(_lhs, _rhs, _mask),
cudf::logic_error);
}
struct StringsCopyIfElseTest : public cudf::test::BaseFixture {};
struct filter_test_fn
{
__host__ __device__ bool operator()(cudf::size_type idx) const
{
return static_cast<bool>(idx % 2);
}
};
TEST_F(StringsCopyIfElseTest, CopyIfElse)
{
std::vector<const char*> h_strings1{ "eee", "bb", "", "aa", "bbb", "" };
cudf::test::strings_column_wrapper strings1( h_strings1.begin(), h_strings1.end() );
auto lhs = cudf::strings_column_view(strings1);
std::vector<const char*> h_strings2{ "zz", "", "yyy", "w", "", "ooo" };
cudf::test::strings_column_wrapper strings2( h_strings2.begin(), h_strings2.end() );
auto rhs = cudf::strings_column_view(strings2);
auto results = cudf::strings::detail::copy_if_else(lhs,rhs,filter_test_fn{});
std::vector<const char*> h_expected;
for( cudf::size_type idx=0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx )
{
if( filter_test_fn()(idx) )
h_expected.push_back( h_strings1[idx] );
else
h_expected.push_back( h_strings2[idx] );
}
cudf::test::strings_column_wrapper expected( h_expected.begin(), h_expected.end());
cudf::test::expect_columns_equal(*results,expected);
}
| f0c1b382e8b1009862eff2a019f515b67781490a.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <tests/utilities/cudf_gtest.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/copy_if_else.cuh>
#include <tests/utilities/base_fixture.hpp>
#include <tests/utilities/type_lists.hpp>
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/column_wrapper.hpp>
#include <tests/utilities/column_utilities.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
template <typename T>
struct CopyTest : public cudf::test::BaseFixture {};
TYPED_TEST_CASE(CopyTest, cudf::test::FixedWidthTypes);
// to keep names shorter
#define wrapper cudf::test::fixed_width_column_wrapper
using bool_wrapper = wrapper<cudf::experimental::bool8>;
TYPED_TEST(CopyTest, CopyIfElseTestShort)
{
using T = TypeParam;
// short one. < 1 warp/bitmask length
int num_els = 4;
bool mask[] = { 1, 0, 0, 0 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5 };
bool lhs_v[] = { 1, 1, 1, 1 };
wrapper<T> lhs_w(lhs, lhs + num_els, lhs_v);
T rhs[] = { 6, 6, 6, 6 };
bool rhs_v[] = { 1, 1, 1, 1 };
wrapper<T> rhs_w(rhs, rhs + num_els, rhs_v);
T expected[] = { 5, 6, 6, 6 };
// bool exp_v[] = { 1, 1, 1, 1 };
wrapper<T> expected_w(expected, expected + num_els);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestManyNulls)
{
using T = TypeParam;
// bunch of nulls in output, non-aligned # of elements
int num_els = 7;
bool mask[] = { 1, 0, 0, 0, 0, 0, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5, 5, 5, 5 };
bool lhs_v[] = { 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> lhs_w(lhs, lhs + num_els, lhs_v);
T rhs[] = { 6, 6, 6, 6, 6, 6, 6 };
bool rhs_v[] = { 1, 0, 0, 0, 0, 0, 1 };
wrapper<T> rhs_w(rhs, rhs + num_els, rhs_v);
T expected[] = { 5, 6, 6, 6, 6, 6, 5 };
bool exp_v[] = { 1, 0, 0, 0, 0, 0, 1 };
wrapper<T> expected_w(expected, expected + num_els, exp_v);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
struct copy_if_else_tiny_grid_functor {
template <typename T, typename Filter>
std::unique_ptr<cudf::column> operator()(cudf::column_view const& lhs,
cudf::column_view const& rhs,
Filter filter,
rmm::mr::device_memory_resource *mr,
cudaStream_t stream)
{
// output
std::unique_ptr<cudf::column> out = cudf::experimental::allocate_like(lhs, lhs.size(), cudf::experimental::mask_allocation_policy::RETAIN, mr);
// device views
auto lhs_dv = cudf::column_device_view::create(lhs);
auto rhs_dv = cudf::column_device_view::create(rhs);
auto out_dv = cudf::mutable_column_device_view::create(*out);
// call the kernel with an artificially small grid
cudf::experimental::detail::copy_if_else_kernel<32, T, Filter, false><<<1, 32, 0, stream>>>(
*lhs_dv, *rhs_dv, filter, *out_dv, nullptr);
return out;
}
};
std::unique_ptr<cudf::column> tiny_grid_launch(cudf::column_view const& lhs, cudf::column_view const& rhs, cudf::column_view const& boolean_mask)
{
auto bool_mask_device_p = cudf::column_device_view::create(boolean_mask);
cudf::column_device_view bool_mask_device = *bool_mask_device_p;
auto filter = [bool_mask_device] __device__ (cudf::size_type i) { return bool_mask_device.element<cudf::experimental::bool8>(i); };
return cudf::experimental::type_dispatcher(lhs.type(),
copy_if_else_tiny_grid_functor{},
lhs,
rhs,
filter,
rmm::mr::get_default_resource(),
(cudaStream_t)0);
}
TYPED_TEST(CopyTest, CopyIfElseTestTinyGrid)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = { 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + num_els);
T rhs[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els);
T expected[] = { 5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
wrapper<T> expected_w(expected, expected + num_els);
auto out = tiny_grid_launch(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestLong)
{
using T = TypeParam;
// make sure we span at least 2 warps
int num_els = 64;
bool mask[] = { 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
bool lhs_v[] = { 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> lhs_w(lhs, lhs + num_els, lhs_v);
T rhs[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 };
bool rhs_v[] = { 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> rhs_w(rhs, rhs + num_els, rhs_v);
T expected[] = { 5, 6, 5, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5,
6, 6, 6, 6, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5 };
bool exp_v[] = { 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
wrapper<T> expected_w(expected, expected + num_els, exp_v);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseTestEmptyInputs)
{
using T = TypeParam;
int num_els = 0;
bool mask[] = {};
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = {};
wrapper<T> lhs_w(lhs, lhs + num_els);
T rhs[] = {};
wrapper<T> rhs_w(rhs, rhs + num_els);
T expected[] = {};
wrapper<T> expected_w(expected, expected + num_els);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseMixedInputValidity)
{
using T = TypeParam;
int num_els = 4;
bool mask[] = { 1, 0, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + num_els);
T rhs[] = { 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els, mask);
T expected[] = { 5, 6, 5, 5 };
wrapper<T> expected_w(expected, expected + num_els, mask);
auto out = cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w);
cudf::test::expect_columns_equal(out->view(), expected_w);
}
TYPED_TEST(CopyTest, CopyIfElseBadInputLength)
{
using T = TypeParam;
int num_els = 4;
// mask length mismatch
{
bool mask[] = { 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + 3);
T lhs[] = { 5, 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + num_els, mask);
T rhs[] = { 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els, mask);
EXPECT_THROW( cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w),
cudf::logic_error);
}
// column length mismatch
{
bool mask[] = { 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
T lhs[] = { 5, 5, 5 };
wrapper<T> lhs_w(lhs, lhs + 3, mask);
T rhs[] = { 6, 6, 6, 6 };
wrapper<T> rhs_w(rhs, rhs + num_els, mask);
EXPECT_THROW( cudf::experimental::copy_if_else(lhs_w, rhs_w, mask_w),
cudf::logic_error);
}
}
struct CopyTestUntyped : public cudf::test::BaseFixture {};
TEST_F(CopyTestUntyped, CopyIfElseTypeMismatch)
{
int num_els = 4;
bool mask[] = { 1, 1, 1, 1 };
bool_wrapper mask_w(mask, mask + num_els);
cudf::column _mask(mask_w);
float lhs[] = { 5, 5, 5, 5 };
wrapper<float> lhs_w(lhs, lhs + num_els, mask);
cudf::column _lhs(lhs_w);
int rhs[] = { 6, 6, 6, 6 };
wrapper<int> rhs_w(rhs, rhs + num_els, mask);
cudf::column _rhs(rhs_w);
EXPECT_THROW( cudf::experimental::copy_if_else(_lhs, _rhs, _mask),
cudf::logic_error);
}
struct StringsCopyIfElseTest : public cudf::test::BaseFixture {};
struct filter_test_fn
{
__host__ __device__ bool operator()(cudf::size_type idx) const
{
return static_cast<bool>(idx % 2);
}
};
TEST_F(StringsCopyIfElseTest, CopyIfElse)
{
std::vector<const char*> h_strings1{ "eee", "bb", "", "aa", "bbb", "ééé" };
cudf::test::strings_column_wrapper strings1( h_strings1.begin(), h_strings1.end() );
auto lhs = cudf::strings_column_view(strings1);
std::vector<const char*> h_strings2{ "zz", "", "yyy", "w", "ééé", "ooo" };
cudf::test::strings_column_wrapper strings2( h_strings2.begin(), h_strings2.end() );
auto rhs = cudf::strings_column_view(strings2);
auto results = cudf::strings::detail::copy_if_else(lhs,rhs,filter_test_fn{});
std::vector<const char*> h_expected;
for( cudf::size_type idx=0; idx < static_cast<cudf::size_type>(h_strings1.size()); ++idx )
{
if( filter_test_fn()(idx) )
h_expected.push_back( h_strings1[idx] );
else
h_expected.push_back( h_strings2[idx] );
}
cudf::test::strings_column_wrapper expected( h_expected.begin(), h_expected.end());
cudf::test::expect_columns_equal(*results,expected);
}
|
0c4ab76c26ec2abb0b8d544bf8f68e0fc282715d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "compute/scalarproduct/scalarproduct_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_scalarproduct_full_device(T alpha, T *arr, T *out, unsigned int arr_size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < arr_size; i += stride) {
out[i] = alpha * arr[i];
}
}
template <typename T>
void scalarproduct_full_device(T alpha, Tensor<T> *x, Tensor<T> *out) {
hipLaunchKernelGGL(( kernel_scalarproduct_full_device), dim3(1), dim3(x->get_size()), 0, 0, alpha, x->get_ptr(), out->get_ptr(), x->get_size());
}
template void scalarproduct_full_device(int alpha, Tensor<int> *x, Tensor<int> *out);
template void scalarproduct_full_device(float alpha, Tensor<float> *x, Tensor<float> *out);
template void scalarproduct_full_device(double alpha, Tensor<double> *x, Tensor<double> *out);
template <typename T>
void scalarproduct_full_device(hipStream_t custream, T alpha, Tensor<T> *x, Tensor<T> *out) {
hipLaunchKernelGGL(( kernel_scalarproduct_full_device)
, dim3(1), dim3(x->get_size()), 0, custream,
alpha, x->get_ptr(), out->get_ptr(), x->get_size());
}
template void scalarproduct_full_device(hipStream_t custream, int alpha, Tensor<int> *x, Tensor<int> *out);
template void scalarproduct_full_device(hipStream_t custream, float alpha, Tensor<float> *x, Tensor<float> *out);
template void scalarproduct_full_device(hipStream_t custream, double alpha, Tensor<double> *x, Tensor<double> *out);
} // namespace internal
} // namespace magmadnn
| 0c4ab76c26ec2abb0b8d544bf8f68e0fc282715d.cu |
#include "compute/scalarproduct/scalarproduct_internal.h"
namespace magmadnn {
namespace internal {
template <typename T>
__global__ void kernel_scalarproduct_full_device(T alpha, T *arr, T *out, unsigned int arr_size) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < arr_size; i += stride) {
out[i] = alpha * arr[i];
}
}
template <typename T>
void scalarproduct_full_device(T alpha, Tensor<T> *x, Tensor<T> *out) {
kernel_scalarproduct_full_device<<<1, x->get_size()>>>(alpha, x->get_ptr(), out->get_ptr(), x->get_size());
}
template void scalarproduct_full_device(int alpha, Tensor<int> *x, Tensor<int> *out);
template void scalarproduct_full_device(float alpha, Tensor<float> *x, Tensor<float> *out);
template void scalarproduct_full_device(double alpha, Tensor<double> *x, Tensor<double> *out);
template <typename T>
void scalarproduct_full_device(cudaStream_t custream, T alpha, Tensor<T> *x, Tensor<T> *out) {
kernel_scalarproduct_full_device
<<<1, x->get_size(), 0, custream>>>
(alpha, x->get_ptr(), out->get_ptr(), x->get_size());
}
template void scalarproduct_full_device(cudaStream_t custream, int alpha, Tensor<int> *x, Tensor<int> *out);
template void scalarproduct_full_device(cudaStream_t custream, float alpha, Tensor<float> *x, Tensor<float> *out);
template void scalarproduct_full_device(cudaStream_t custream, double alpha, Tensor<double> *x, Tensor<double> *out);
} // namespace internal
} // namespace magmadnn
|
2ddd3361694190285b5592c46ffbbb993cf56af3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by GS <sgazeos@gmail.com> on 3/21/2018.
//
#include <array/ResultSet.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/matrix_diag_part.h>
#include <execution/cuda/LaunchDims.h>
namespace sd {
namespace ops {
namespace helpers {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// put diagonals from input batched matrices to output batched vectors
template <typename T>
static SD_KERNEL void matrixDiagPartKernel(void const* inputBuffer, void* outputBuffer, sd::LongType numTads,
sd::LongType inputLength, const sd::LongType* tadOnlyInputShapeInfo,
const sd::LongType* tadInputOffsets,
const sd::LongType* tadOnlyOutputShapeInfo,
const sd::LongType* tadOutputOffsets) {
if(blockIdx.x >= numTads)
return;
auto outputBuffer2 = reinterpret_cast<T*>(outputBuffer);
auto inputBuffer2 = reinterpret_cast<T const*>(inputBuffer);
int totalThreads = blockDim.x;
for (sd::LongType i = blockIdx.x; i < numTads; i += gridDim.x) {
auto yOffset = tadInputOffsets[i];
auto xOffset = tadOutputOffsets[i];
for (sd::LongType j = threadIdx.x; j < inputLength; j += totalThreads) {
sd::LongType coords[2] = {j, j};
sd::LongType tadOffset = shape::getOffset(tadOnlyInputShapeInfo, coords);
*(reinterpret_cast<T*>(outputBuffer) + xOffset + shape::getIndexOffset(j, tadOnlyOutputShapeInfo)) =
*(reinterpret_cast<T const*>(inputBuffer) + yOffset + tadOffset);
}
}
}
//////////////////////////////////////////////////////////////////////////
// Returns a batched matrix tensor with new batched diagonal values.
// for detailed explanations please take a look on web page:
// https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag
//
template <typename T>
static sd::Status _matrixDiagPart(sd::LaunchContext* context, const NDArray* input, NDArray* output) {
auto stream = context->getCudaStream();
auto listOut = output->allTensorsAlongDimension({output->rankOf() - 1});
auto listDiag = input->allTensorsAlongDimension({input->rankOf() - 2, input->rankOf() - 1});
if (listOut.size() != listDiag.size()) {
sd_printf("matrix_diag_part: Input matrix has wrong shape.", "");
return sd::Status::VALIDATION;
}
sd::LongType lastDimension = sd::math::sd_min(input->sizeAt(-2), input->sizeAt(-1));
sd::LongType dims = output->rankOf() - 1;
std::vector<sd::LongType> *dimsToExclude = ShapeUtils::evalDimsToExclude(output->rankOf(), 1,&dims);
const sd::LongType numTads =
ShapeUtils::getNumOfSubArrs(input->shapeInfo(),*dimsToExclude);
std::vector<sd::LongType> outputDims({output->rankOf() - 1});
std::vector<sd::LongType> inputDims({input->rankOf() - 2, input->rankOf() - 1});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), &inputDims);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), &outputDims);
if (!output->isActualOnDeviceSide()) input->syncToDevice();
if (!input->isActualOnDeviceSide()) input->syncToDevice();
dim3 launchDims = getLaunchDims("matrixDiag");
hipLaunchKernelGGL(( matrixDiagPartKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream,
input->specialBuffer(), output->specialBuffer(),numTads, lastDimension, packX->specialShapeInfo(),
packX->specialOffsets(), packZ->specialShapeInfo(), packZ->specialOffsets());
delete dimsToExclude;
return sd::Status::OK;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// caller for _matrixDiagPart
//
sd::Status matrixDiagPart(sd::LaunchContext* context, const NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _matrixDiagPart, (context, input, output), SD_COMMON_TYPES);
}
BUILD_SINGLE_TEMPLATE(template sd::Status _matrixDiagPart,
(sd::LaunchContext * context, const NDArray* input, NDArray* output), SD_COMMON_TYPES);
} // namespace helpers
} // namespace ops
} // namespace sd
| 2ddd3361694190285b5592c46ffbbb993cf56af3.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by GS <sgazeos@gmail.com> on 3/21/2018.
//
#include <array/ResultSet.h>
#include <exceptions/cuda_exception.h>
#include <helpers/ConstantTadHelper.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <ops/declarable/helpers/matrix_diag_part.h>
#include <execution/cuda/LaunchDims.h>
namespace sd {
namespace ops {
namespace helpers {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// put diagonals from input batched matrices to output batched vectors
template <typename T>
static SD_KERNEL void matrixDiagPartKernel(void const* inputBuffer, void* outputBuffer, sd::LongType numTads,
sd::LongType inputLength, const sd::LongType* tadOnlyInputShapeInfo,
const sd::LongType* tadInputOffsets,
const sd::LongType* tadOnlyOutputShapeInfo,
const sd::LongType* tadOutputOffsets) {
if(blockIdx.x >= numTads)
return;
auto outputBuffer2 = reinterpret_cast<T*>(outputBuffer);
auto inputBuffer2 = reinterpret_cast<T const*>(inputBuffer);
int totalThreads = blockDim.x;
for (sd::LongType i = blockIdx.x; i < numTads; i += gridDim.x) {
auto yOffset = tadInputOffsets[i];
auto xOffset = tadOutputOffsets[i];
for (sd::LongType j = threadIdx.x; j < inputLength; j += totalThreads) {
sd::LongType coords[2] = {j, j};
sd::LongType tadOffset = shape::getOffset(tadOnlyInputShapeInfo, coords);
*(reinterpret_cast<T*>(outputBuffer) + xOffset + shape::getIndexOffset(j, tadOnlyOutputShapeInfo)) =
*(reinterpret_cast<T const*>(inputBuffer) + yOffset + tadOffset);
}
}
}
//////////////////////////////////////////////////////////////////////////
// Returns a batched matrix tensor with new batched diagonal values.
// for detailed explanations please take a look on web page:
// https://www.tensorflow.org/api_docs/python/tf/matrix_set_diag
//
template <typename T>
static sd::Status _matrixDiagPart(sd::LaunchContext* context, const NDArray* input, NDArray* output) {
auto stream = context->getCudaStream();
auto listOut = output->allTensorsAlongDimension({output->rankOf() - 1});
auto listDiag = input->allTensorsAlongDimension({input->rankOf() - 2, input->rankOf() - 1});
if (listOut.size() != listDiag.size()) {
sd_printf("matrix_diag_part: Input matrix has wrong shape.", "");
return sd::Status::VALIDATION;
}
sd::LongType lastDimension = sd::math::sd_min(input->sizeAt(-2), input->sizeAt(-1));
sd::LongType dims = output->rankOf() - 1;
std::vector<sd::LongType> *dimsToExclude = ShapeUtils::evalDimsToExclude(output->rankOf(), 1,&dims);
const sd::LongType numTads =
ShapeUtils::getNumOfSubArrs(input->shapeInfo(),*dimsToExclude);
std::vector<sd::LongType> outputDims({output->rankOf() - 1});
std::vector<sd::LongType> inputDims({input->rankOf() - 2, input->rankOf() - 1});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), &inputDims);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), &outputDims);
if (!output->isActualOnDeviceSide()) input->syncToDevice();
if (!input->isActualOnDeviceSide()) input->syncToDevice();
dim3 launchDims = getLaunchDims("matrixDiag");
matrixDiagPartKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(
input->specialBuffer(), output->specialBuffer(),numTads, lastDimension, packX->specialShapeInfo(),
packX->specialOffsets(), packZ->specialShapeInfo(), packZ->specialOffsets());
delete dimsToExclude;
return sd::Status::OK;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// caller for _matrixDiagPart
//
sd::Status matrixDiagPart(sd::LaunchContext* context, const NDArray* input, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), return _matrixDiagPart, (context, input, output), SD_COMMON_TYPES);
}
BUILD_SINGLE_TEMPLATE(template sd::Status _matrixDiagPart,
(sd::LaunchContext * context, const NDArray* input, NDArray* output), SD_COMMON_TYPES);
} // namespace helpers
} // namespace ops
} // namespace sd
|
f547eafd3b06b86f8b87724963d4f5e7b7e88d1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void aKernel()
{
int idx = threadIdx.x;
int r1, r2, res_diff;
__shared__ int arr[512];
arr[idx] = idx;
printf("A: Thread %5d, value %5d\n", idx, arr[idx]);
__syncthreads();
r1 = arr[idx];
if (idx < 511) {
int temp = arr[idx + 1];
__syncthreads();
arr[idx] = temp;
}
r2 = arr[idx];
res_diff = r2 - r1;
printf("B: Thread %5d, value %5d, diff=%5d\n", idx, arr[idx], res_diff);
}
int main()
{
hipLaunchKernelGGL(( aKernel), dim3(1), dim3(512), 0, 0, );
return 0;
}
| f547eafd3b06b86f8b87724963d4f5e7b7e88d1b.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void aKernel()
{
int idx = threadIdx.x;
int r1, r2, res_diff;
__shared__ int arr[512];
arr[idx] = idx;
printf("A: Thread %5d, value %5d\n", idx, arr[idx]);
__syncthreads();
r1 = arr[idx];
if (idx < 511) {
int temp = arr[idx + 1];
__syncthreads();
arr[idx] = temp;
}
r2 = arr[idx];
res_diff = r2 - r1;
printf("B: Thread %5d, value %5d, diff=%5d\n", idx, arr[idx], res_diff);
}
int main()
{
aKernel<<<1, 512>>> ();
return 0;
}
|
25a229f0d406385af084a97d4e834b4bff4b8e39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add(int* a, int* b, int* c)
{
*c = *a + *b;
}
int main(void)
{
int a, b, c; // host copies of data
int *d_a, *d_b, *d_c; // devices coipes of data
int size = sizeof(int);
printf("Integer size is %d bytes\n", size);
// Allocate space for device copies of data
hipMalloc((void**) &d_a, size);
hipMalloc((void**) &d_b, size);
hipMalloc((void**) &d_c, size);
// Setup integers
a = 2;
b = 7;
printf("a=%d; b=%d\n", a, b);
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// Launch add() on device
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
// Cleanup
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Print out results
printf("a + b = c\n");
printf("%d + %d = %d\n", a, b, c);
return 0;
}
| 25a229f0d406385af084a97d4e834b4bff4b8e39.cu | #include <stdio.h>
__global__ void add(int* a, int* b, int* c)
{
*c = *a + *b;
}
int main(void)
{
int a, b, c; // host copies of data
int *d_a, *d_b, *d_c; // devices coipes of data
int size = sizeof(int);
printf("Integer size is %d bytes\n", size);
// Allocate space for device copies of data
cudaMalloc((void**) &d_a, size);
cudaMalloc((void**) &d_b, size);
cudaMalloc((void**) &d_c, size);
// Setup integers
a = 2;
b = 7;
printf("a=%d; b=%d\n", a, b);
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() on device
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Print out results
printf("a + b = c\n");
printf("%d + %d = %d\n", a, b, c);
return 0;
}
|
a3661992a8e2230467a40dd0b5b9956524914870.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/device_uvector.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <benchmark/benchmark.h>
#include <hip/hip_runtime_api.h>
static void BM_UvectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
rmm::mr::set_current_device_resource(&mr);
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
rmm::device_uvector<int32_t> vec(state.range(0), rmm::cuda_stream_view{});
hipDeviceSynchronize();
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
rmm::mr::set_current_device_resource(nullptr);
}
BENCHMARK(BM_UvectorSizeConstruction)
->RangeMultiplier(10) // NOLINT
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);
static void BM_ThrustVectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
rmm::mr::set_current_device_resource(&mr);
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
rmm::device_vector<int32_t> vec(state.range(0));
hipDeviceSynchronize();
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
rmm::mr::set_current_device_resource(nullptr);
}
BENCHMARK(BM_ThrustVectorSizeConstruction)
->RangeMultiplier(10) // NOLINT
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);
BENCHMARK_MAIN();
| a3661992a8e2230467a40dd0b5b9956524914870.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either ex ess or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/device_uvector.hpp>
#include <rmm/device_vector.hpp>
#include <rmm/mr/device/cuda_memory_resource.hpp>
#include <rmm/mr/device/per_device_resource.hpp>
#include <rmm/mr/device/pool_memory_resource.hpp>
#include <benchmark/benchmark.h>
#include <cuda_runtime_api.h>
static void BM_UvectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
rmm::mr::set_current_device_resource(&mr);
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
rmm::device_uvector<int32_t> vec(state.range(0), rmm::cuda_stream_view{});
cudaDeviceSynchronize();
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
rmm::mr::set_current_device_resource(nullptr);
}
BENCHMARK(BM_UvectorSizeConstruction)
->RangeMultiplier(10) // NOLINT
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);
static void BM_ThrustVectorSizeConstruction(benchmark::State& state)
{
rmm::mr::cuda_memory_resource cuda_mr{};
rmm::mr::pool_memory_resource<rmm::mr::cuda_memory_resource> mr{&cuda_mr};
rmm::mr::set_current_device_resource(&mr);
for (auto _ : state) { // NOLINT(clang-analyzer-deadcode.DeadStores)
rmm::device_vector<int32_t> vec(state.range(0));
cudaDeviceSynchronize();
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()));
rmm::mr::set_current_device_resource(nullptr);
}
BENCHMARK(BM_ThrustVectorSizeConstruction)
->RangeMultiplier(10) // NOLINT
->Range(10'000, 1'000'000'000) // NOLINT
->Unit(benchmark::kMicrosecond);
BENCHMARK_MAIN();
|
0f67fb41432d4cdcf2ab28b1f0b4c8d760034f22.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <time.h>
#include <sys/time.h>
#include <vector>
#include <string>
#include <iostream>
#include <gtest/gtest.h>
#include <utilities/legacy/error_utils.hpp>
#include <cuspatial/soa_readers.hpp>
#include <cuspatial/hausdorff.hpp>
#include <utility/utility.hpp>
#include "hausdorff_util.h"
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
struct HausdorffCompare : public GdfTest
{
gdf_column pnt_x,pnt_y,cnt;
size_t free_mem = 0, total_mem = 0;
void set_initialize(const char *point_fn, const char *cnt_fn)
{
hipMemGetInfo(&free_mem, &total_mem);
std::cout<<"GPU total_mem="<<total_mem<<std::endl;
std::cout<<"beginning GPU free_mem="<<free_mem<<std::endl;
struct timeval t0,t1;
gettimeofday(&t0, nullptr);
auto points=cuspatial::read_xy_points_soa(point_fn);
pnt_x=points.first;
pnt_y=points.second;
cnt=cuspatial::read_uint32_soa(cnt_fn);
gettimeofday(&t1, nullptr);
float data_load_time=cuspatial::calc_time("point/cnt data loading time=", t0,t1);
CUDF_EXPECTS(pnt_x.size>0 && pnt_y.size>0 && cnt.size>=0,"invalid # of points/trajectories");
CUDF_EXPECTS(pnt_x.size==pnt_y.size, "x and y columns must have the same size");
CUDF_EXPECTS(pnt_y.size >=cnt.size ,"a point set must have at least one point");
}
};
#if 0 // disable until data files are available
TEST_F(HausdorffCompare, hausdorfftest)
{
//currently using hard coded paths; to be updated
std::string point_fn =std::string("/home/jianting/trajcode/locust256.coor");
std::string cnt_fn =std::string("/home/jianting/trajcode/locust256.objcnt");
//initializaiton
this->set_initialize(point_fn.c_str(),cnt_fn.c_str());
//run cuspatial::directed_hausdorff_distance twice
struct timeval t0,t1;
gettimeofday(&t0, nullptr);
gdf_column dist=cuspatial::directed_hausdorff_distance(this->pnt_x,this->pnt_y, this->cnt);
gettimeofday(&t1, nullptr);
float gpu_hausdorff_time=cuspatial::calc_time("GPU Hausdorff Distance time......",t0,t1);
int set_size=this->cnt.size;
int num_pair=dist.size;
assert(num_pair==set_size*set_size);
std::cout<<"num_pair="<<num_pair<<std::endl;
//transfer data to CPU and run on CPU
int num_pnt=this->pnt_x.size;
double *x_c=new double[num_pnt];
double *y_c=new double[num_pnt];
uint32_t *cnt_c=new uint32_t[set_size];
assert(x_c!=nullptr && y_c!=nullptr && cnt_c!=nullptr);
hipMemcpy(x_c,this->pnt_x.data ,num_pnt*sizeof(double) , hipMemcpyDeviceToHost);
hipMemcpy(y_c,this->pnt_y.data ,num_pnt*sizeof(double) , hipMemcpyDeviceToHost);
hipMemcpy(cnt_c,this->cnt.data ,set_size*sizeof(uint32_t) , hipMemcpyDeviceToHost);
//test only the first subset_size pairs on CPUs
int subset_size=100;
double *dist_c=nullptr;
hausdorff_test_sequential<double>(subset_size,x_c,y_c,cnt_c,dist_c);
assert(dist_c!=nullptr);
double *dist_h=new double[num_pair];
hipMemcpy(dist_h,dist.data ,num_pair*sizeof(double) , hipMemcpyDeviceToHost);
//verify the CPU results are the same as the two GPU results
int diff_cnt=0 ;
for(int i=0;i<subset_size;i++)
{
for(int j=0;j<subset_size;j++)
{
int p1=i*subset_size+j;
int p2=i*set_size+j;
if(fabs(dist_c[p1]-dist_h[p2])>0.00001)
{
//std::cout<<"diff:("<<i<<","<<j<<") "<<dist_c[p1]<<" "<<dist_h[p2]<<std::endl;
diff_cnt++;
}
}
}
if(diff_cnt==0)
std::cout<<"GPU and CPU results are identical...................OK"<<std::endl;
else
std::cout<<"# of GPU and CPU diffs="<<diff_cnt<<std::endl;
hipMemGetInfo(&this->free_mem, &this->total_mem);
std::cout<<"ending GPU free mem "<<this->free_mem<<std::endl;
}
#endif
| 0f67fb41432d4cdcf2ab28b1f0b4c8d760034f22.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <time.h>
#include <sys/time.h>
#include <vector>
#include <string>
#include <iostream>
#include <gtest/gtest.h>
#include <utilities/legacy/error_utils.hpp>
#include <cuspatial/soa_readers.hpp>
#include <cuspatial/hausdorff.hpp>
#include <utility/utility.hpp>
#include "hausdorff_util.h"
#include <tests/utilities/legacy/cudf_test_utils.cuh>
#include <tests/utilities/legacy/cudf_test_fixtures.h>
struct HausdorffCompare : public GdfTest
{
gdf_column pnt_x,pnt_y,cnt;
size_t free_mem = 0, total_mem = 0;
void set_initialize(const char *point_fn, const char *cnt_fn)
{
cudaMemGetInfo(&free_mem, &total_mem);
std::cout<<"GPU total_mem="<<total_mem<<std::endl;
std::cout<<"beginning GPU free_mem="<<free_mem<<std::endl;
struct timeval t0,t1;
gettimeofday(&t0, nullptr);
auto points=cuspatial::read_xy_points_soa(point_fn);
pnt_x=points.first;
pnt_y=points.second;
cnt=cuspatial::read_uint32_soa(cnt_fn);
gettimeofday(&t1, nullptr);
float data_load_time=cuspatial::calc_time("point/cnt data loading time=", t0,t1);
CUDF_EXPECTS(pnt_x.size>0 && pnt_y.size>0 && cnt.size>=0,"invalid # of points/trajectories");
CUDF_EXPECTS(pnt_x.size==pnt_y.size, "x and y columns must have the same size");
CUDF_EXPECTS(pnt_y.size >=cnt.size ,"a point set must have at least one point");
}
};
#if 0 // disable until data files are available
TEST_F(HausdorffCompare, hausdorfftest)
{
//currently using hard coded paths; to be updated
std::string point_fn =std::string("/home/jianting/trajcode/locust256.coor");
std::string cnt_fn =std::string("/home/jianting/trajcode/locust256.objcnt");
//initializaiton
this->set_initialize(point_fn.c_str(),cnt_fn.c_str());
//run cuspatial::directed_hausdorff_distance twice
struct timeval t0,t1;
gettimeofday(&t0, nullptr);
gdf_column dist=cuspatial::directed_hausdorff_distance(this->pnt_x,this->pnt_y, this->cnt);
gettimeofday(&t1, nullptr);
float gpu_hausdorff_time=cuspatial::calc_time("GPU Hausdorff Distance time......",t0,t1);
int set_size=this->cnt.size;
int num_pair=dist.size;
assert(num_pair==set_size*set_size);
std::cout<<"num_pair="<<num_pair<<std::endl;
//transfer data to CPU and run on CPU
int num_pnt=this->pnt_x.size;
double *x_c=new double[num_pnt];
double *y_c=new double[num_pnt];
uint32_t *cnt_c=new uint32_t[set_size];
assert(x_c!=nullptr && y_c!=nullptr && cnt_c!=nullptr);
cudaMemcpy(x_c,this->pnt_x.data ,num_pnt*sizeof(double) , cudaMemcpyDeviceToHost);
cudaMemcpy(y_c,this->pnt_y.data ,num_pnt*sizeof(double) , cudaMemcpyDeviceToHost);
cudaMemcpy(cnt_c,this->cnt.data ,set_size*sizeof(uint32_t) , cudaMemcpyDeviceToHost);
//test only the first subset_size pairs on CPUs
int subset_size=100;
double *dist_c=nullptr;
hausdorff_test_sequential<double>(subset_size,x_c,y_c,cnt_c,dist_c);
assert(dist_c!=nullptr);
double *dist_h=new double[num_pair];
cudaMemcpy(dist_h,dist.data ,num_pair*sizeof(double) , cudaMemcpyDeviceToHost);
//verify the CPU results are the same as the two GPU results
int diff_cnt=0 ;
for(int i=0;i<subset_size;i++)
{
for(int j=0;j<subset_size;j++)
{
int p1=i*subset_size+j;
int p2=i*set_size+j;
if(fabs(dist_c[p1]-dist_h[p2])>0.00001)
{
//std::cout<<"diff:("<<i<<","<<j<<") "<<dist_c[p1]<<" "<<dist_h[p2]<<std::endl;
diff_cnt++;
}
}
}
if(diff_cnt==0)
std::cout<<"GPU and CPU results are identical...................OK"<<std::endl;
else
std::cout<<"# of GPU and CPU diffs="<<diff_cnt<<std::endl;
cudaMemGetInfo(&this->free_mem, &this->total_mem);
std::cout<<"ending GPU free mem "<<this->free_mem<<std::endl;
}
#endif
|
e52fd5a833f4a4753b5071c51d79c94d3453ac8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
#define FILTER_PARAM (2.0f / (INNER_PI * INNER_PI ))
__global__ void convolution(float* PICTURE, float* RESULT, PictureParameter picParam){
extern __shared__ float shared_line[];
int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y;
int Row = by * blockDim.y + ty, Col = bx * blockDim.x + tx;
int share_mem_index;
int pic_height = gridDim.y * blockDim.y;
double weight;
float V2, SID2V2, PRE_U;
int FILTER_RADIUS = picParam.nu /2;
/* zero padding for left and right */
shared_line[tx] = 0;
shared_line[tx + FILTER_RADIUS * 2] = 0;
__syncthreads();
V2 = (Row - pic_height / 2) * picParam.dpv;
V2 = V2 * V2;
SID2V2 = picParam.sid * picParam.sid + V2;
int pixel_index = Row * picParam.nu + Col;
if (bx == 0){//left
PRE_U = (float)(blockDim.x - tx) * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x + tx] = PICTURE[pixel_index] * weight;
PRE_U = (float) tx * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x * 2 + tx] = PICTURE[pixel_index + blockDim.x] * weight;
}else if (bx == 1){//right
PRE_U = (float)tx * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x + tx] = PICTURE[pixel_index - blockDim.x] * weight;
PRE_U = (float)(blockDim.x - tx) * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x * 2 + tx] = PICTURE[pixel_index] * weight;
}
__syncthreads();
share_mem_index = blockDim.x * (bx + 1) + tx;
float temp = 0.0f, filter = 0.0f;
/*becuase every bank will be accessed by the adjecent thread, there is 2-way bank conflict
if you write:
temp += (shared_line[share_mem_index - j] + shared_line[share_mem_index + j]* filter);
*/
#pragma unroll
for (int j = 1; j < (PROJECTION_SIZE / 2); j++){
filter = FILTER_PARAM / (1.0f - 4.0f * j * j);
temp += shared_line[share_mem_index - j] * filter;
temp += shared_line[share_mem_index + j] * filter;
}
temp += shared_line[share_mem_index] * FILTER_PARAM; //own point
RESULT[pixel_index] = temp;
}
| e52fd5a833f4a4753b5071c51d79c94d3453ac8d.cu | #include "header.h"
#define FILTER_PARAM (2.0f / (INNER_PI * INNER_PI ))
__global__ void convolution(float* PICTURE, float* RESULT, PictureParameter picParam){
extern __shared__ float shared_line[];
int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y;
int Row = by * blockDim.y + ty, Col = bx * blockDim.x + tx;
int share_mem_index;
int pic_height = gridDim.y * blockDim.y;
double weight;
float V2, SID2V2, PRE_U;
int FILTER_RADIUS = picParam.nu /2;
/* zero padding for left and right */
shared_line[tx] = 0;
shared_line[tx + FILTER_RADIUS * 2] = 0;
__syncthreads();
V2 = (Row - pic_height / 2) * picParam.dpv;
V2 = V2 * V2;
SID2V2 = picParam.sid * picParam.sid + V2;
int pixel_index = Row * picParam.nu + Col;
if (bx == 0){//left
PRE_U = (float)(blockDim.x - tx) * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x + tx] = PICTURE[pixel_index] * weight;
PRE_U = (float) tx * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x * 2 + tx] = PICTURE[pixel_index + blockDim.x] * weight;
}else if (bx == 1){//right
PRE_U = (float)tx * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x + tx] = PICTURE[pixel_index - blockDim.x] * weight;
PRE_U = (float)(blockDim.x - tx) * picParam.dpu;
weight = (double)(picParam.sid / sqrt(SID2V2 + PRE_U * PRE_U));
shared_line[blockDim.x * 2 + tx] = PICTURE[pixel_index] * weight;
}
__syncthreads();
share_mem_index = blockDim.x * (bx + 1) + tx;
float temp = 0.0f, filter = 0.0f;
/*becuase every bank will be accessed by the adjecent thread, there is 2-way bank conflict
if you write:
temp += (shared_line[share_mem_index - j] + shared_line[share_mem_index + j]* filter);
*/
#pragma unroll
for (int j = 1; j < (PROJECTION_SIZE / 2); j++){
filter = FILTER_PARAM / (1.0f - 4.0f * j * j);
temp += shared_line[share_mem_index - j] * filter;
temp += shared_line[share_mem_index + j] * filter;
}
temp += shared_line[share_mem_index] * FILTER_PARAM; //own point
RESULT[pixel_index] = temp;
}
|
ffd07165fe1d7f1e16c645448ef4ac3fc89b89d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Defines.h"
#include "cucommon.cuh"
#include <iostream>
void CUDA_CHECK_ERR(unsigned lineNumber, const char* fileName) {
hipError_t err = hipGetLastError();
if (err) std::cout << "Error " << err << " on line " << lineNumber << " of " << fileName << ": " << hipGetErrorString(err) << std::endl;
}
float getElapsed(hipEvent_t start, hipEvent_t stop) {
float elapsed;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
__device__ int2 convert(int asize, int Qpx, float pin) {
float frac; float round;
//TODO add the 1 afterward?
frac = modf((pin+1)*asize, &round);
return make_int2(int(round), int(frac*Qpx));
}
__device__ void atomicAddWrap(float* address, float val)
{
#ifdef __NOATOMIC
*address+=val;
#else
#ifdef __CASATOMIC
float old_v, new_v;
do {
old_v = *address;
new_v = old_v + val;
} while (atomicCAS((unsigned *) address, __float_as_int(old_v), __float_as_int(new_v)) != __float_as_int(old_v));
#else
atomicAdd(address, val);
#endif
#endif
}
__device__ void atomicAddWrap(double* address, double val)
{
#ifdef __NOATOMIC
*address+=val;
#else
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
#endif
}
__device__ double make_zero(double2* in) { return (double)0.0;}
__device__ float make_zero(float2* in) { return (float)0.0;}
template <int gcf_dim, class CmplxType>
__global__ void
__launch_bounds__(256, 8)
grid_kernel(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts,
size_t img_dim, CmplxType* gcf) {
//TODO remove hard-coded 32
CmplxType __shared__ inbuff[32];
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
__syncthreads();
int raw_idx = threadIdx.x+blockDim.x*threadIdx.y;
if (raw_idx < 32) inbuff[raw_idx]= in[n+raw_idx];
//if (threadIdx.x<32 && threadIdx.y==blockDim.y-1) invalbuff[threadIdx.x]=in_vals[n+threadIdx.x];
__syncthreads();
for (int q=threadIdx.y;q<32&&n+q<npts;q+=blockDim.y) {
CmplxType inn = inbuff[q];
int sub_x = floor(GCF_GRID*(inn.x-floor(inn.x)));
int sub_y = floor(GCF_GRID*(inn.y-floor(inn.y)));
int main_x = floor(inn.x);
int main_y = floor(inn.y);
auto sum_r = make_zero(out);
auto sum_i = make_zero(out);
for(int a = -(int)threadIdx.x+gcf_dim/2;a>-gcf_dim/2;a-=blockDim.x)
for(int b = gcf_dim/2;b>-gcf_dim/2;b--)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) {
} else {
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
float xsquare = (main_x-inn.x+sub_x*1.0/8.0);
float ysquare = (main_y-inn.y+sub_y*1.0/8.0);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
auto r2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x;
auto i2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y;
#endif
//#pragma unroll
for (int p=0;p<POLARIZATIONS;p++) {
auto r1 = in_vals[(n+q)*POLARIZATIONS+p].x;
auto i1 = in_vals[(n+q)*POLARIZATIONS+p].y;
#ifdef DEBUG1
atomicAddWrap(&out[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE].x, 1.0);
atomicAddWrap(&out[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE].y, n+q);
#else
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)+p*img_dim*img_dim].x, r1*r2 - i1*i2);
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)+p*img_dim*img_dim].y, r1*i2 + r2*i1);
//out[main_x+a+img_dim*(main_y+b)].x += r1*r2 - i1*i2;
//out[main_x+a+img_dim*(main_y+b)].y += r1*i2 + r2*i1;
#endif
} //p
}
} //b
} //q
} //n
}
template <int gcf_dim, class CmplxType>
__global__ void
//__launch_bounds__(256, 6)
grid_kernel_basic(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts,
size_t img_dim, CmplxType* gcf) {
//TODO remove hard-coded 32
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
CmplxType inn = in[n+q];
int sub_x = floor(GCF_GRID*(inn.x-floor(inn.x)));
int sub_y = floor(GCF_GRID*(inn.y-floor(inn.y)));
int main_x = floor(inn.x);
int main_y = floor(inn.y);
auto sum_r = make_zero(out);
auto sum_i = make_zero(out);
for(int a = -(int)threadIdx.x+gcf_dim/2;a>-gcf_dim/2;a-=blockDim.x)
for(int b = gcf_dim/2;b>-gcf_dim/2;b--)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = in_vals[n+q].x;
auto i1 = in_vals[n+q].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) {
r1=i1=0.0;
} else {
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
float xsquare = (main_x-inn.x+sub_x*1.0/8.0);
float ysquare = (main_y-inn.y+sub_y*1.0/8.0);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
auto r2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x;
auto i2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y;
#endif
#ifdef DEBUG1
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].x, n+q);
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].y, gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x)+gcf_dim*b+a].y);
#else
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].x, r1*r2 - i1*i2);
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].y, r1*i2 + r2*i1);
#endif
}
}
#if 0
for(int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
CmplxType tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
out[n+q] = tmp;
}
#endif
}
}
}
template <int gcf_dim, class CmplxType>
__global__ void
//__launch_bounds__(256, 6)
grid_kernel_small_gcf(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts,
size_t img_dim, CmplxType* gcf) {
//TODO remove hard-coded 32
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
CmplxType inn = in[n+q];
int sub_x = floor(GCF_GRID*(inn.x-floor(inn.x)));
int sub_y = floor(GCF_GRID*(inn.y-floor(inn.y)));
int main_x = floor(inn.x);
int main_y = floor(inn.y);
auto sum_r = make_zero(out);
auto sum_i = make_zero(out);
int a = -gcf_dim/2 + (int)threadIdx.x%gcf_dim;
for(int b = -gcf_dim/2+(int)threadIdx.x/gcf_dim;b<gcf_dim/2;b+=blockDim.x/gcf_dim)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = in_vals[n+q].x;
auto i1 = in_vals[n+q].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) {
r1=i1=0.0;
}
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
float xsquare = (main_x-inn.x+sub_x*1.0/GCF_GRID);
float ysquare = (main_y-inn.y+sub_y*1.0/GCF_GRID);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y);
#endif
out[main_x+a+img_dim*(main_y+b)].x += r1*r2 - i1*i2;
out[main_x+a+img_dim*(main_y+b)].y += r1*i2 + r2*i1;
}
#if 0
for(int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
CmplxType tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
out[n+q] = tmp;
}
#endif
}
}
}
__device__ void warp_reduce(double &in, int sz = 16) {
if (16<sz) sz=16;
for(int s = sz; s>0;s/=2) {
in += __shfl_down(in,s);
}
}
__device__ void warp_reduce(float &in, int sz = 16) {
if (16<sz) sz=16;
for(int s = sz; s>0;s/=2) {
in += __shfl_down(in,s);
}
}
__device__ void warp_reduce2(float &in, int sz = 32) {
if (32<sz) sz=32;
for(int s=1; s<sz; s*=2) {
in += __shfl_down(in,s);
}
}
__device__ void warp_reduce2(double &in, int sz = 32) {
if (32<sz) sz=32;
for(int s=1; s<sz; s*=2) {
in += __shfl_down(in,s);
}
}
template <class CmplxType>
__global__ void vis2ints(CmplxType *vis_in, int2* vis_out, int npts) {
for (int q=threadIdx.x+blockIdx.x*blockDim.x;
q<npts;
q+=gridDim.x*blockDim.x) {
CmplxType inn = vis_in[q];
int main_y = floor(inn.y);
int sub_y = floor(GCF_GRID*(inn.y-main_y));
int main_x = floor(inn.x);
int sub_x = floor(GCF_GRID*(inn.x-main_x));
vis_out[q].x = main_x*GCF_GRID+sub_x;
vis_out[q].y = main_y*GCF_GRID+sub_y;
}
}
//Make sure visibilities are sorted by main_x/blocksize then main_y/blocksize
// blockgrid should be (img_dim+blocksize-1)/blocksize
__global__ void set_bookmarks(int2* vis_in, int npts, int blocksize, int blockgrid, int* bookmarks) {
for (int q=threadIdx.x+blockIdx.x*blockDim.x;q<=npts;q+=gridDim.x*blockDim.x) {
int2 this_vis = vis_in[q];
int2 last_vis = vis_in[q-1];
int main_x = this_vis.x/GCF_GRID/blocksize;
int main_x_last = last_vis.x/GCF_GRID/blocksize;
int main_y = this_vis.y/GCF_GRID/blocksize;
int main_y_last = last_vis.y/GCF_GRID/blocksize;
if (0==q) {
main_y_last=0;
main_x_last=-1;
}
if (npts==q) main_x = main_y = blockgrid;
if (main_x != main_x_last || main_y != main_y_last) {
for (int z=main_y_last*blockgrid+main_x_last+1;
z<=main_y*blockgrid+main_x; z++) {
bookmarks[z] = q;
}
}
}
}
template <int gcf_dim, class CmplxType>
__global__ void
#if POLARIZATIONS == 1
__launch_bounds__(1024, 2)
#else
__launch_bounds__(GCF_DIM*GCF_DIM/4/4/GCF_STRIPES/PTS, 12)
#endif
grid_kernel_gather(CmplxType* out, int2* in, CmplxType* in_vals, size_t npts,
int img_dim, CmplxType* gcf, int* bookmarks, int yoff) {
int2 __shared__ inbuff[32];
CmplxType __shared__ invalbuff[POLARIZATIONS][32+32/POLARIZATIONS];
const int bm_dim = (img_dim+gcf_dim-1)/gcf_dim*2;
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
int left = blockIdx.x*blockDim.x;
int top = blockIdx.y*blockDim.y*PTS*GCF_STRIPES;
int this_x = left+threadIdx.x;
int this_y = top+threadIdx.y+yoff;
//if (this_x >= img_dim) return;
//if (this_y >= img_dim) return;
CmplxType sum[POLARIZATIONS][PTS];
for (int p=0;p<PTS;p++) {
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
sum[pz][p] = out[this_x + this_y*img_dim+p*blockDim.y*img_dim+pz*img_dim*img_dim];
}
}
int half_gcf = gcf_dim/2;
int bm_x = left/half_gcf-1;
int bm_y = top/half_gcf-1;
for (int y=bm_y<0?0:bm_y;(y<bm_y+2+(blockDim.y+half_gcf-1)/half_gcf)&&(y<(img_dim+half_gcf-1)/half_gcf);y++) {
for (int x=bm_x<0?0:bm_x;(x<bm_x+2+(blockDim.x+half_gcf-1)/half_gcf)&&(x<(img_dim+half_gcf-1)/half_gcf);x++) {
int bm_start = bookmarks[y*bm_dim+x];
int bm_end = bookmarks[y*bm_dim+x+1];
for (int n=bm_start; n<= bm_end; n+=32) {
__syncthreads();
int raw_idx = threadIdx.x+blockDim.x*threadIdx.y;
if (raw_idx < 32) inbuff[raw_idx]= in[n+raw_idx];
else {
raw_idx -= 32;
if (raw_idx < 32*POLARIZATIONS) invalbuff[raw_idx%POLARIZATIONS][raw_idx/POLARIZATIONS]= in_vals[n*POLARIZATIONS+raw_idx];
}
//if (threadIdx.x<32 && threadIdx.y==blockDim.y-1) invalbuff[threadIdx.x]=in_vals[n+threadIdx.x];
__syncthreads();
for (int q = 0; q<32 && n+q < bm_end; q++) {
int2 inn = inbuff[q];
for (int p = 0; p < PTS; p++) {
int main_y = inn.y/GCF_GRID;
if (this_y + blockDim.y*p >= img_dim) continue;
int b = this_y + blockDim.y*p - main_y;
if (b > half_gcf || b <= - half_gcf) continue;
int main_x = inn.x/GCF_GRID;
int a = this_x - main_x;
if (a > half_gcf || a <= - half_gcf) continue;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
float xsquare = (main_x-inn.x+sub_x*1.0/GCF_GRID);
float ysquare = (main_y-inn.y+sub_y*1.0/GCF_GRID);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
CmplxType ctmp = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a]);
auto r2 = ctmp.x;
auto i2 = ctmp.y;
//auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a].x);
//auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a].y);
#endif
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
CmplxType r1 = invalbuff[pz][q];
//CmplxType r1 = in_vals[p+POLARIZATIONS*(n+q)];
#ifdef DEBUG1
sum[pz][p].x += 1.0;
sum[pz][p].y += n+q;
#else
sum[pz][p].x += r1.x*r2 - r1.y*i2;
sum[pz][p].y += r1.x*i2 + r2*r1.y;
#endif
} //pz
} //p
} //q
} //n
} //x
} //y
for (int p=0;p<PTS;p++) {
if (this_y + blockDim.y*p >= img_dim) continue;
if (this_x >= img_dim) continue;
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
out[this_x + img_dim * (this_y+blockDim.y*p) + pz*img_dim*img_dim] = sum[pz][p];
}
}
}
template <int gcf_dim, class CmplxType>
__global__ void
__launch_bounds__(GCF_DIM*BLOCK_Y, 4)
grid_kernel_window(CmplxType* out, int2* in, CmplxType* in_vals, size_t npts,
int img_dim, CmplxType* gcf) {
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
int2 __shared__ inbuff[32];
CmplxType __shared__ invalbuff[POLARIZATIONS][32+32/POLARIZATIONS];
CmplxType sum[POLARIZATIONS];
CmplxType r1;
int half_gcf = gcf_dim/2;
int local_npt = (npts+gridDim.x-1)/gridDim.x; //number of points assigned to this block
in += local_npt*blockIdx.x;
in_vals += local_npt*blockIdx.x*POLARIZATIONS;
int last_idx = -INT_MAX;
size_t gcf_y = threadIdx.y + blockIdx.y*blockDim.y;
if (blockIdx.x==gridDim.x-1) local_npt = npts-local_npt*blockIdx.x;
for (int n=0; n<local_npt; n+=32) {
__syncthreads();
int raw_idx = threadIdx.x+blockDim.x*threadIdx.y;
if (raw_idx < 32) inbuff[raw_idx]= in[n+raw_idx];
else {
raw_idx -= 32;
if (raw_idx < 32*POLARIZATIONS) invalbuff[raw_idx%POLARIZATIONS][raw_idx/POLARIZATIONS]= in_vals[n*POLARIZATIONS+raw_idx];
}
//shm[threadIdx.x][threadIdx.y].x = 0.00;
//shm[threadIdx.x][threadIdx.y].y = 0.00;
__syncthreads();
for (int q = 0; q<32 && n+q < local_npt; q++) {
int2 inn = inbuff[q];
int main_y = inn.y/GCF_GRID;
int main_x = inn.x/GCF_GRID;
//TODO adjust to favor the high side
int this_x = gcf_dim*((main_x+half_gcf-(int)threadIdx.x)/gcf_dim)+(int)threadIdx.x;
int this_y;
this_y = gcf_dim*((main_y+half_gcf-gcf_y)/gcf_dim)+gcf_y;
if (main_x+half_gcf < threadIdx.x || this_x >= img_dim ||
main_y+half_gcf < gcf_y || this_y >= img_dim) {
//TODO pad instead?
} else {
int this_idx = this_x + img_dim * this_y;
prof_trigger(0);
if (last_idx != this_idx) {
prof_trigger(1);
if (last_idx != -INT_MAX) {
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].x, sum[pz].x);
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].y, sum[pz].y);
}
}
for (int pz=0;pz<POLARIZATIONS;pz++) sum[pz].x = sum[pz].y = 0.0;
last_idx = this_idx;
}
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
float xsquare = (main_x-inn.x+sub_x*1.0/GCF_GRID);
float ysquare = (main_y-inn.y+sub_y*1.0/GCF_GRID);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
int b = this_y - main_y;
int a = this_x - main_x;
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y);
#endif
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
r1 = invalbuff[pz][q];
//r1 = in_vals[POLARIZATIONS*(n+q)+pz];
#ifdef DEBUG1
sum[pz].x += 1.0;
sum[pz].y += n+q + blockIdx.x*((npts+gridDim.x-1)/gridDim.x);
#else
sum[pz].x += r1.x*r2 - r1.y*i2;
sum[pz].y += r1.x*i2 + r2*r1.y;
#endif
}
}
//reduce in two directions
//WARNING: Adjustments must be made if blockDim.y and blockDim.x are no
// powers of 2
//Reduce using shuffle first
} //q
} //n
if (last_idx != -INT_MAX) {
//#pragma unroll
for(int pz=0;pz<POLARIZATIONS;pz++) {
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].x, sum[pz].x);
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].y, sum[pz].y);
}
}
}
template <class CmplxType>
void gridGPU(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts, size_t img_dim,
CmplxType *gcf, size_t gcf_dim) {
//grid on the GPU
// out (out) - the output image
// in (in) - the input locations
// in_vals (in) - input values
// npts (in) - number of locations
// img_dim (in) - dimension of the image
// gcf (in) - the gridding convolution function
// gcf_dim (in) - dimension of the GCF
CmplxType *d_out, *d_in, *d_in_vals, *d_gcf;
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
CUDA_CHECK_ERR(__LINE__,__FILE__);
#ifdef __MANAGED
d_gcf = gcf;
std::cout << "d_out = out" << std::endl;
d_out = out;
d_in = in;
d_in_vals = in_vals;
sizeof(CmplxType) << std::endl;
#else
//img is padded to avoid overruns. Subtract to find the real head
//Pin CPU memory
hipHostRegister(out, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS, hipHostRegisterMapped);
hipHostRegister(gcf, sizeof(CmplxType)*GCF_GRID*GCF_GRID*gcf_dim*gcf_dim, hipHostRegisterMapped);
hipHostRegister(in, sizeof(CmplxType)*npts, hipHostRegisterMapped);
hipHostRegister(in_vals, sizeof(CmplxType)*npts*POLARIZATIONS, hipHostRegisterMapped);
//Allocate GPU memory
hipMalloc(&d_out, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
hipMalloc(&d_gcf, sizeof(CmplxType)*GCF_GRID*GCF_GRID*gcf_dim*gcf_dim);
hipMalloc(&d_in, sizeof(CmplxType)*npts);
hipMalloc(&d_in_vals, sizeof(CmplxType)*npts*POLARIZATIONS);
CUDA_CHECK_ERR(__LINE__,__FILE__);
//Copy in img, gcf and out
hipEventRecord(start);
hipMemcpy(d_gcf, gcf, sizeof(CmplxType)*64*gcf_dim*gcf_dim,
hipMemcpyHostToDevice);
hipMemcpy(d_in, in, sizeof(CmplxType)*npts,
hipMemcpyHostToDevice);
hipMemcpy(d_in_vals, in_vals, sizeof(CmplxType)*npts*POLARIZATIONS,
hipMemcpyHostToDevice);
CUDA_CHECK_ERR(__LINE__,__FILE__);
std::cout << "memcpy time: " << getElapsed(start, stop) << " ms." << std::endl;
//move d_img and d_gcf to remove padding
#endif
//offset gcf to point to the middle of the first GCF for cleaner code later
d_gcf += gcf_dim*(gcf_dim-1)/2-1;
CmplxType* d_out_unpad = d_out + img_dim*gcf_dim+gcf_dim;
#ifdef __GATHER
int2* in_ints;
int* bookmarks;
hipMalloc(&in_ints, sizeof(int2)*npts);
hipMalloc(&bookmarks, sizeof(int)*((img_dim/gcf_dim)*(img_dim/gcf_dim)*4+1));
hipLaunchKernelGGL(( vis2ints), dim3(4),dim3(256), 0, 0, d_in, in_ints, npts);
CUDA_CHECK_ERR(__LINE__,__FILE__);
hipLaunchKernelGGL(( set_bookmarks), dim3(4),dim3(256), 0, 0, in_ints, npts, gcf_dim/2, (img_dim+gcf_dim/2-1)/(gcf_dim/2),
bookmarks);
int2* h_ints = (int2*)malloc(sizeof(int2)*npts);
hipMemcpy(h_ints, in_ints, sizeof(int2)*npts, hipMemcpyDeviceToHost);
CUDA_CHECK_ERR(__LINE__,__FILE__);
hipMemset(d_out, 0, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
hipEventRecord(start);
for (int stripe=0;stripe<GCF_STRIPES;stripe++)
hipLaunchKernelGGL(( grid_kernel_gather<GCF_DIM>)
, dim3(dim3((img_dim+gcf_dim/4-1)/(gcf_dim/4), (img_dim+gcf_dim/4-1)/(gcf_dim/4))),
dim3(dim3(gcf_dim/4, gcf_dim/4/PTS/GCF_STRIPES)), 0, 0, // <-- Must not truncate here
// , dim3(img_dim+gcf_dim-1)/(gcf_dim), (img_dim+gcf_dim-1)/(gcf_dim)),
// dim3(gcf_dim, gcf_dim)>>>
(d_out_unpad,in_ints,d_in_vals,npts,img_dim,d_gcf,bookmarks,stripe*gcf_dim/4/GCF_STRIPES);
//std::cout<< "grid_kernel_gather<<<(" << (img_dim+gcf_dim-1)/gcf_dim << ", " << (img_dim+gcf_dim/4-1)/(gcf_dim/4) << "), (" << gcf_dim << ", " << gcf_dim/4 << ")>>>()" << std::endl;
CUDA_CHECK_ERR(__LINE__,__FILE__);
#else
#ifdef __MOVING_WINDOW
int2* in_ints;
hipMalloc(&in_ints, sizeof(int2)*npts);
hipLaunchKernelGGL(( vis2ints), dim3(4),dim3(256), 0, 0, d_in, in_ints, npts);
CUDA_CHECK_ERR(__LINE__,__FILE__);
hipMemset(d_out, 0, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
hipEventRecord(start);
hipLaunchKernelGGL(( grid_kernel_window<GCF_DIM>)
, dim3(dim3((npts+31)/32,GCF_DIM/BLOCK_Y)),dim3(dim3(GCF_DIM,BLOCK_Y)), 0, 0, d_out_unpad,in_ints,d_in_vals,npts,img_dim,d_gcf);
//vis2ints<<<dim3(npts/64,8),dim3(GCF_DIM,GCF_DIM/8)>>>(d_in, in_ints, npts);
#else
hipEventRecord(start);
hipMemset(d_out, 0, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
if (GCF_DIM < 32) {
hipLaunchKernelGGL(( grid_kernel_small_gcf<GCF_DIM>)
, dim3(npts/32),dim3(dim3(32,32)), 0, 0, d_out_unpad,d_in,d_in_vals,npts,img_dim,d_gcf);
} else {
hipLaunchKernelGGL(( grid_kernel<GCF_DIM>)
, dim3(npts/32),dim3(dim3(32,8)), 0, 0, d_out_unpad,d_in,d_in_vals,npts,img_dim,d_gcf);
}
#endif
#endif
float kernel_time = getElapsed(start,stop);
std::cout << "Processed " << npts << " complex points in " << kernel_time << " ms." << std::endl;
std::cout << npts / 1000000.0 / kernel_time * gcf_dim * gcf_dim * 8 * POLARIZATIONS << " Gflops" << std::endl;
CUDA_CHECK_ERR(__LINE__,__FILE__);
#ifdef __MANAGED
hipDeviceSynchronize();
#else
CUDA_CHECK_ERR(__LINE__,__FILE__);
hipMemcpy(out, d_out,
sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS,
hipMemcpyDeviceToHost);
CUDA_CHECK_ERR(__LINE__,__FILE__);
//Unpin CPU memory
hipHostUnregister(gcf);
hipHostUnregister(out);
hipHostUnregister(in);
hipHostUnregister(in_vals);
//Restore d_img and d_gcf for deallocation
d_gcf -= gcf_dim*(gcf_dim-1)/2-1;
hipFree(d_out);
#ifdef __GATHER
hipFree(in_ints);
hipFree(bookmarks);
#endif
#endif
hipEventDestroy(start); hipEventDestroy(stop);
CUDA_CHECK_ERR(__LINE__,__FILE__);
}
template void gridGPU<double2>(double2* out, double2* in, double2* in_vals, size_t npts,
size_t img_dim, double2 *gcf, size_t gcf_dim);
template void gridGPU<float2>(float2* out, float2* in, float2* in_vals, size_t npts,
size_t img_dim, float2 *gcf, size_t gcf_dim);
| ffd07165fe1d7f1e16c645448ef4ac3fc89b89d9.cu | #include "Defines.h"
#include "cucommon.cuh"
#include <iostream>
void CUDA_CHECK_ERR(unsigned lineNumber, const char* fileName) {
cudaError_t err = cudaGetLastError();
if (err) std::cout << "Error " << err << " on line " << lineNumber << " of " << fileName << ": " << cudaGetErrorString(err) << std::endl;
}
float getElapsed(cudaEvent_t start, cudaEvent_t stop) {
float elapsed;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
__device__ int2 convert(int asize, int Qpx, float pin) {
float frac; float round;
//TODO add the 1 afterward?
frac = modf((pin+1)*asize, &round);
return make_int2(int(round), int(frac*Qpx));
}
__device__ void atomicAddWrap(float* address, float val)
{
#ifdef __NOATOMIC
*address+=val;
#else
#ifdef __CASATOMIC
float old_v, new_v;
do {
old_v = *address;
new_v = old_v + val;
} while (atomicCAS((unsigned *) address, __float_as_int(old_v), __float_as_int(new_v)) != __float_as_int(old_v));
#else
atomicAdd(address, val);
#endif
#endif
}
__device__ void atomicAddWrap(double* address, double val)
{
#ifdef __NOATOMIC
*address+=val;
#else
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
#endif
}
__device__ double make_zero(double2* in) { return (double)0.0;}
__device__ float make_zero(float2* in) { return (float)0.0;}
template <int gcf_dim, class CmplxType>
__global__ void
__launch_bounds__(256, 8)
grid_kernel(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts,
size_t img_dim, CmplxType* gcf) {
//TODO remove hard-coded 32
CmplxType __shared__ inbuff[32];
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
__syncthreads();
int raw_idx = threadIdx.x+blockDim.x*threadIdx.y;
if (raw_idx < 32) inbuff[raw_idx]= in[n+raw_idx];
//if (threadIdx.x<32 && threadIdx.y==blockDim.y-1) invalbuff[threadIdx.x]=in_vals[n+threadIdx.x];
__syncthreads();
for (int q=threadIdx.y;q<32&&n+q<npts;q+=blockDim.y) {
CmplxType inn = inbuff[q];
int sub_x = floor(GCF_GRID*(inn.x-floor(inn.x)));
int sub_y = floor(GCF_GRID*(inn.y-floor(inn.y)));
int main_x = floor(inn.x);
int main_y = floor(inn.y);
auto sum_r = make_zero(out);
auto sum_i = make_zero(out);
for(int a = -(int)threadIdx.x+gcf_dim/2;a>-gcf_dim/2;a-=blockDim.x)
for(int b = gcf_dim/2;b>-gcf_dim/2;b--)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) {
} else {
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
float xsquare = (main_x-inn.x+sub_x*1.0/8.0);
float ysquare = (main_y-inn.y+sub_y*1.0/8.0);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
auto r2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x;
auto i2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y;
#endif
//#pragma unroll
for (int p=0;p<POLARIZATIONS;p++) {
auto r1 = in_vals[(n+q)*POLARIZATIONS+p].x;
auto i1 = in_vals[(n+q)*POLARIZATIONS+p].y;
#ifdef DEBUG1
atomicAddWrap(&out[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE].x, 1.0);
atomicAddWrap(&out[main_x+a+IMG_SIZE*(main_y+b)+p*IMG_SIZE*IMG_SIZE].y, n+q);
#else
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)+p*img_dim*img_dim].x, r1*r2 - i1*i2);
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)+p*img_dim*img_dim].y, r1*i2 + r2*i1);
//out[main_x+a+img_dim*(main_y+b)].x += r1*r2 - i1*i2;
//out[main_x+a+img_dim*(main_y+b)].y += r1*i2 + r2*i1;
#endif
} //p
}
} //b
} //q
} //n
}
template <int gcf_dim, class CmplxType>
__global__ void
//__launch_bounds__(256, 6)
grid_kernel_basic(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts,
size_t img_dim, CmplxType* gcf) {
//TODO remove hard-coded 32
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
CmplxType inn = in[n+q];
int sub_x = floor(GCF_GRID*(inn.x-floor(inn.x)));
int sub_y = floor(GCF_GRID*(inn.y-floor(inn.y)));
int main_x = floor(inn.x);
int main_y = floor(inn.y);
auto sum_r = make_zero(out);
auto sum_i = make_zero(out);
for(int a = -(int)threadIdx.x+gcf_dim/2;a>-gcf_dim/2;a-=blockDim.x)
for(int b = gcf_dim/2;b>-gcf_dim/2;b--)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = in_vals[n+q].x;
auto i1 = in_vals[n+q].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) {
r1=i1=0.0;
} else {
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
float xsquare = (main_x-inn.x+sub_x*1.0/8.0);
float ysquare = (main_y-inn.y+sub_y*1.0/8.0);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
auto r2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x;
auto i2 = gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y;
#endif
#ifdef DEBUG1
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].x, n+q);
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].y, gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x)+gcf_dim*b+a].y);
#else
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].x, r1*r2 - i1*i2);
atomicAddWrap(&out[main_x+a+img_dim*(main_y+b)].y, r1*i2 + r2*i1);
#endif
}
}
#if 0
for(int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
CmplxType tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
out[n+q] = tmp;
}
#endif
}
}
}
template <int gcf_dim, class CmplxType>
__global__ void
//__launch_bounds__(256, 6)
grid_kernel_small_gcf(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts,
size_t img_dim, CmplxType* gcf) {
//TODO remove hard-coded 32
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
for (int n = 32*blockIdx.x; n<npts; n+= 32*gridDim.x) {
for (int q=threadIdx.y;q<32;q+=blockDim.y) {
CmplxType inn = in[n+q];
int sub_x = floor(GCF_GRID*(inn.x-floor(inn.x)));
int sub_y = floor(GCF_GRID*(inn.y-floor(inn.y)));
int main_x = floor(inn.x);
int main_y = floor(inn.y);
auto sum_r = make_zero(out);
auto sum_i = make_zero(out);
int a = -gcf_dim/2 + (int)threadIdx.x%gcf_dim;
for(int b = -gcf_dim/2+(int)threadIdx.x/gcf_dim;b<gcf_dim/2;b+=blockDim.x/gcf_dim)
{
//auto this_img = img[main_x+a+img_dim*(main_y+b)];
//auto r1 = this_img.x;
//auto i1 = this_img.y;
auto r1 = in_vals[n+q].x;
auto i1 = in_vals[n+q].y;
if (main_x+a < 0 || main_y+b < 0 ||
main_x+a >= IMG_SIZE || main_y+b >= IMG_SIZE) {
r1=i1=0.0;
}
//auto this_gcf = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a]);
//auto r2 = this_gcf.x;
//auto i2 = this_gcf.y;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
float xsquare = (main_x-inn.x+sub_x*1.0/GCF_GRID);
float ysquare = (main_y-inn.y+sub_y*1.0/GCF_GRID);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y);
#endif
out[main_x+a+img_dim*(main_y+b)].x += r1*r2 - i1*i2;
out[main_x+a+img_dim*(main_y+b)].y += r1*i2 + r2*i1;
}
#if 0
for(int s = blockDim.x < 16 ? blockDim.x : 16; s>0;s/=2) {
sum_r += __shfl_down(sum_r,s);
sum_i += __shfl_down(sum_i,s);
}
CmplxType tmp;
tmp.x = sum_r;
tmp.y = sum_i;
if (threadIdx.x == 0) {
out[n+q] = tmp;
}
#endif
}
}
}
__device__ void warp_reduce(double &in, int sz = 16) {
if (16<sz) sz=16;
for(int s = sz; s>0;s/=2) {
in += __shfl_down(in,s);
}
}
__device__ void warp_reduce(float &in, int sz = 16) {
if (16<sz) sz=16;
for(int s = sz; s>0;s/=2) {
in += __shfl_down(in,s);
}
}
__device__ void warp_reduce2(float &in, int sz = 32) {
if (32<sz) sz=32;
for(int s=1; s<sz; s*=2) {
in += __shfl_down(in,s);
}
}
__device__ void warp_reduce2(double &in, int sz = 32) {
if (32<sz) sz=32;
for(int s=1; s<sz; s*=2) {
in += __shfl_down(in,s);
}
}
template <class CmplxType>
__global__ void vis2ints(CmplxType *vis_in, int2* vis_out, int npts) {
for (int q=threadIdx.x+blockIdx.x*blockDim.x;
q<npts;
q+=gridDim.x*blockDim.x) {
CmplxType inn = vis_in[q];
int main_y = floor(inn.y);
int sub_y = floor(GCF_GRID*(inn.y-main_y));
int main_x = floor(inn.x);
int sub_x = floor(GCF_GRID*(inn.x-main_x));
vis_out[q].x = main_x*GCF_GRID+sub_x;
vis_out[q].y = main_y*GCF_GRID+sub_y;
}
}
//Make sure visibilities are sorted by main_x/blocksize then main_y/blocksize
// blockgrid should be (img_dim+blocksize-1)/blocksize
__global__ void set_bookmarks(int2* vis_in, int npts, int blocksize, int blockgrid, int* bookmarks) {
for (int q=threadIdx.x+blockIdx.x*blockDim.x;q<=npts;q+=gridDim.x*blockDim.x) {
int2 this_vis = vis_in[q];
int2 last_vis = vis_in[q-1];
int main_x = this_vis.x/GCF_GRID/blocksize;
int main_x_last = last_vis.x/GCF_GRID/blocksize;
int main_y = this_vis.y/GCF_GRID/blocksize;
int main_y_last = last_vis.y/GCF_GRID/blocksize;
if (0==q) {
main_y_last=0;
main_x_last=-1;
}
if (npts==q) main_x = main_y = blockgrid;
if (main_x != main_x_last || main_y != main_y_last) {
for (int z=main_y_last*blockgrid+main_x_last+1;
z<=main_y*blockgrid+main_x; z++) {
bookmarks[z] = q;
}
}
}
}
template <int gcf_dim, class CmplxType>
__global__ void
#if POLARIZATIONS == 1
__launch_bounds__(1024, 2)
#else
__launch_bounds__(GCF_DIM*GCF_DIM/4/4/GCF_STRIPES/PTS, 12)
#endif
grid_kernel_gather(CmplxType* out, int2* in, CmplxType* in_vals, size_t npts,
int img_dim, CmplxType* gcf, int* bookmarks, int yoff) {
int2 __shared__ inbuff[32];
CmplxType __shared__ invalbuff[POLARIZATIONS][32+32/POLARIZATIONS];
const int bm_dim = (img_dim+gcf_dim-1)/gcf_dim*2;
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
int left = blockIdx.x*blockDim.x;
int top = blockIdx.y*blockDim.y*PTS*GCF_STRIPES;
int this_x = left+threadIdx.x;
int this_y = top+threadIdx.y+yoff;
//if (this_x >= img_dim) return;
//if (this_y >= img_dim) return;
CmplxType sum[POLARIZATIONS][PTS];
for (int p=0;p<PTS;p++) {
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
sum[pz][p] = out[this_x + this_y*img_dim+p*blockDim.y*img_dim+pz*img_dim*img_dim];
}
}
int half_gcf = gcf_dim/2;
int bm_x = left/half_gcf-1;
int bm_y = top/half_gcf-1;
for (int y=bm_y<0?0:bm_y;(y<bm_y+2+(blockDim.y+half_gcf-1)/half_gcf)&&(y<(img_dim+half_gcf-1)/half_gcf);y++) {
for (int x=bm_x<0?0:bm_x;(x<bm_x+2+(blockDim.x+half_gcf-1)/half_gcf)&&(x<(img_dim+half_gcf-1)/half_gcf);x++) {
int bm_start = bookmarks[y*bm_dim+x];
int bm_end = bookmarks[y*bm_dim+x+1];
for (int n=bm_start; n<= bm_end; n+=32) {
__syncthreads();
int raw_idx = threadIdx.x+blockDim.x*threadIdx.y;
if (raw_idx < 32) inbuff[raw_idx]= in[n+raw_idx];
else {
raw_idx -= 32;
if (raw_idx < 32*POLARIZATIONS) invalbuff[raw_idx%POLARIZATIONS][raw_idx/POLARIZATIONS]= in_vals[n*POLARIZATIONS+raw_idx];
}
//if (threadIdx.x<32 && threadIdx.y==blockDim.y-1) invalbuff[threadIdx.x]=in_vals[n+threadIdx.x];
__syncthreads();
for (int q = 0; q<32 && n+q < bm_end; q++) {
int2 inn = inbuff[q];
for (int p = 0; p < PTS; p++) {
int main_y = inn.y/GCF_GRID;
if (this_y + blockDim.y*p >= img_dim) continue;
int b = this_y + blockDim.y*p - main_y;
if (b > half_gcf || b <= - half_gcf) continue;
int main_x = inn.x/GCF_GRID;
int a = this_x - main_x;
if (a > half_gcf || a <= - half_gcf) continue;
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
float xsquare = (main_x-inn.x+sub_x*1.0/GCF_GRID);
float ysquare = (main_y-inn.y+sub_y*1.0/GCF_GRID);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
CmplxType ctmp = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a]);
auto r2 = ctmp.x;
auto i2 = ctmp.y;
//auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a].x);
//auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
// gcf_dim*b+a].y);
#endif
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
CmplxType r1 = invalbuff[pz][q];
//CmplxType r1 = in_vals[p+POLARIZATIONS*(n+q)];
#ifdef DEBUG1
sum[pz][p].x += 1.0;
sum[pz][p].y += n+q;
#else
sum[pz][p].x += r1.x*r2 - r1.y*i2;
sum[pz][p].y += r1.x*i2 + r2*r1.y;
#endif
} //pz
} //p
} //q
} //n
} //x
} //y
for (int p=0;p<PTS;p++) {
if (this_y + blockDim.y*p >= img_dim) continue;
if (this_x >= img_dim) continue;
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
out[this_x + img_dim * (this_y+blockDim.y*p) + pz*img_dim*img_dim] = sum[pz][p];
}
}
}
template <int gcf_dim, class CmplxType>
__global__ void
__launch_bounds__(GCF_DIM*BLOCK_Y, 4)
grid_kernel_window(CmplxType* out, int2* in, CmplxType* in_vals, size_t npts,
int img_dim, CmplxType* gcf) {
#ifdef __COMPUTE_GCF
double T = gcf[0].x;
double w = gcf[0].y;
float p1 = 2*3.1415926*w;
float p2 = p1*T;
#endif
int2 __shared__ inbuff[32];
CmplxType __shared__ invalbuff[POLARIZATIONS][32+32/POLARIZATIONS];
CmplxType sum[POLARIZATIONS];
CmplxType r1;
int half_gcf = gcf_dim/2;
int local_npt = (npts+gridDim.x-1)/gridDim.x; //number of points assigned to this block
in += local_npt*blockIdx.x;
in_vals += local_npt*blockIdx.x*POLARIZATIONS;
int last_idx = -INT_MAX;
size_t gcf_y = threadIdx.y + blockIdx.y*blockDim.y;
if (blockIdx.x==gridDim.x-1) local_npt = npts-local_npt*blockIdx.x;
for (int n=0; n<local_npt; n+=32) {
__syncthreads();
int raw_idx = threadIdx.x+blockDim.x*threadIdx.y;
if (raw_idx < 32) inbuff[raw_idx]= in[n+raw_idx];
else {
raw_idx -= 32;
if (raw_idx < 32*POLARIZATIONS) invalbuff[raw_idx%POLARIZATIONS][raw_idx/POLARIZATIONS]= in_vals[n*POLARIZATIONS+raw_idx];
}
//shm[threadIdx.x][threadIdx.y].x = 0.00;
//shm[threadIdx.x][threadIdx.y].y = 0.00;
__syncthreads();
for (int q = 0; q<32 && n+q < local_npt; q++) {
int2 inn = inbuff[q];
int main_y = inn.y/GCF_GRID;
int main_x = inn.x/GCF_GRID;
//TODO adjust to favor the high side
int this_x = gcf_dim*((main_x+half_gcf-(int)threadIdx.x)/gcf_dim)+(int)threadIdx.x;
int this_y;
this_y = gcf_dim*((main_y+half_gcf-gcf_y)/gcf_dim)+gcf_y;
if (main_x+half_gcf < threadIdx.x || this_x >= img_dim ||
main_y+half_gcf < gcf_y || this_y >= img_dim) {
//TODO pad instead?
} else {
int this_idx = this_x + img_dim * this_y;
prof_trigger(0);
if (last_idx != this_idx) {
prof_trigger(1);
if (last_idx != -INT_MAX) {
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].x, sum[pz].x);
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].y, sum[pz].y);
}
}
for (int pz=0;pz<POLARIZATIONS;pz++) sum[pz].x = sum[pz].y = 0.0;
last_idx = this_idx;
}
#ifdef __COMPUTE_GCF
//double phase = 2*3.1415926*w*(1-T*sqrt((main_x-inn.x)*(main_x-inn.x)+(main_y-inn.y)*(main_y-inn.y)));
//double r2 = sin(phase);
//double i2 = cos(phase);
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
float xsquare = (main_x-inn.x+sub_x*1.0/GCF_GRID);
float ysquare = (main_y-inn.y+sub_y*1.0/GCF_GRID);
xsquare *= xsquare;
ysquare *= ysquare;
float phase = p1 - p2*sqrt(xsquare + ysquare);
float r2,i2;
sincos(phase, &r2, &i2);
#else
int sub_x = inn.x%GCF_GRID;
int sub_y = inn.y%GCF_GRID;
int b = this_y - main_y;
int a = this_x - main_x;
auto r2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].x);
auto i2 = __ldg(&gcf[gcf_dim*gcf_dim*(GCF_GRID*sub_y+sub_x) +
gcf_dim*b+a].y);
#endif
//#pragma unroll
for (int pz=0;pz<POLARIZATIONS;pz++) {
r1 = invalbuff[pz][q];
//r1 = in_vals[POLARIZATIONS*(n+q)+pz];
#ifdef DEBUG1
sum[pz].x += 1.0;
sum[pz].y += n+q + blockIdx.x*((npts+gridDim.x-1)/gridDim.x);
#else
sum[pz].x += r1.x*r2 - r1.y*i2;
sum[pz].y += r1.x*i2 + r2*r1.y;
#endif
}
}
//reduce in two directions
//WARNING: Adjustments must be made if blockDim.y and blockDim.x are no
// powers of 2
//Reduce using shuffle first
} //q
} //n
if (last_idx != -INT_MAX) {
//#pragma unroll
for(int pz=0;pz<POLARIZATIONS;pz++) {
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].x, sum[pz].x);
atomicAddWrap(&out[last_idx+pz*img_dim*img_dim].y, sum[pz].y);
}
}
}
template <class CmplxType>
void gridGPU(CmplxType* out, CmplxType* in, CmplxType* in_vals, size_t npts, size_t img_dim,
CmplxType *gcf, size_t gcf_dim) {
//grid on the GPU
// out (out) - the output image
// in (in) - the input locations
// in_vals (in) - input values
// npts (in) - number of locations
// img_dim (in) - dimension of the image
// gcf (in) - the gridding convolution function
// gcf_dim (in) - dimension of the GCF
CmplxType *d_out, *d_in, *d_in_vals, *d_gcf;
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
CUDA_CHECK_ERR(__LINE__,__FILE__);
#ifdef __MANAGED
d_gcf = gcf;
std::cout << "d_out = out" << std::endl;
d_out = out;
d_in = in;
d_in_vals = in_vals;
sizeof(CmplxType) << std::endl;
#else
//img is padded to avoid overruns. Subtract to find the real head
//Pin CPU memory
cudaHostRegister(out, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS, cudaHostRegisterMapped);
cudaHostRegister(gcf, sizeof(CmplxType)*GCF_GRID*GCF_GRID*gcf_dim*gcf_dim, cudaHostRegisterMapped);
cudaHostRegister(in, sizeof(CmplxType)*npts, cudaHostRegisterMapped);
cudaHostRegister(in_vals, sizeof(CmplxType)*npts*POLARIZATIONS, cudaHostRegisterMapped);
//Allocate GPU memory
cudaMalloc(&d_out, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
cudaMalloc(&d_gcf, sizeof(CmplxType)*GCF_GRID*GCF_GRID*gcf_dim*gcf_dim);
cudaMalloc(&d_in, sizeof(CmplxType)*npts);
cudaMalloc(&d_in_vals, sizeof(CmplxType)*npts*POLARIZATIONS);
CUDA_CHECK_ERR(__LINE__,__FILE__);
//Copy in img, gcf and out
cudaEventRecord(start);
cudaMemcpy(d_gcf, gcf, sizeof(CmplxType)*64*gcf_dim*gcf_dim,
cudaMemcpyHostToDevice);
cudaMemcpy(d_in, in, sizeof(CmplxType)*npts,
cudaMemcpyHostToDevice);
cudaMemcpy(d_in_vals, in_vals, sizeof(CmplxType)*npts*POLARIZATIONS,
cudaMemcpyHostToDevice);
CUDA_CHECK_ERR(__LINE__,__FILE__);
std::cout << "memcpy time: " << getElapsed(start, stop) << " ms." << std::endl;
//move d_img and d_gcf to remove padding
#endif
//offset gcf to point to the middle of the first GCF for cleaner code later
d_gcf += gcf_dim*(gcf_dim-1)/2-1;
CmplxType* d_out_unpad = d_out + img_dim*gcf_dim+gcf_dim;
#ifdef __GATHER
int2* in_ints;
int* bookmarks;
cudaMalloc(&in_ints, sizeof(int2)*npts);
cudaMalloc(&bookmarks, sizeof(int)*((img_dim/gcf_dim)*(img_dim/gcf_dim)*4+1));
vis2ints<<<4,256>>>(d_in, in_ints, npts);
CUDA_CHECK_ERR(__LINE__,__FILE__);
set_bookmarks<<<4,256>>>(in_ints, npts, gcf_dim/2, (img_dim+gcf_dim/2-1)/(gcf_dim/2),
bookmarks);
int2* h_ints = (int2*)malloc(sizeof(int2)*npts);
cudaMemcpy(h_ints, in_ints, sizeof(int2)*npts, cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR(__LINE__,__FILE__);
cudaMemset(d_out, 0, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
cudaEventRecord(start);
for (int stripe=0;stripe<GCF_STRIPES;stripe++)
grid_kernel_gather<GCF_DIM>
<<<dim3((img_dim+gcf_dim/4-1)/(gcf_dim/4), (img_dim+gcf_dim/4-1)/(gcf_dim/4)),
dim3(gcf_dim/4, gcf_dim/4/PTS/GCF_STRIPES)>>> // <-- Must not truncate here
// <<<dim3((img_dim+gcf_dim-1)/(gcf_dim), (img_dim+gcf_dim-1)/(gcf_dim)),
// dim3(gcf_dim, gcf_dim)>>>
(d_out_unpad,in_ints,d_in_vals,npts,img_dim,d_gcf,bookmarks,stripe*gcf_dim/4/GCF_STRIPES);
//std::cout<< "grid_kernel_gather<<<(" << (img_dim+gcf_dim-1)/gcf_dim << ", " << (img_dim+gcf_dim/4-1)/(gcf_dim/4) << "), (" << gcf_dim << ", " << gcf_dim/4 << ")>>>()" << std::endl;
CUDA_CHECK_ERR(__LINE__,__FILE__);
#else
#ifdef __MOVING_WINDOW
int2* in_ints;
cudaMalloc(&in_ints, sizeof(int2)*npts);
vis2ints<<<4,256>>>(d_in, in_ints, npts);
CUDA_CHECK_ERR(__LINE__,__FILE__);
cudaMemset(d_out, 0, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
cudaEventRecord(start);
grid_kernel_window<GCF_DIM>
<<<dim3((npts+31)/32,GCF_DIM/BLOCK_Y),dim3(GCF_DIM,BLOCK_Y)>>>(d_out_unpad,in_ints,d_in_vals,npts,img_dim,d_gcf);
//vis2ints<<<dim3(npts/64,8),dim3(GCF_DIM,GCF_DIM/8)>>>(d_in, in_ints, npts);
#else
cudaEventRecord(start);
cudaMemset(d_out, 0, sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS);
if (GCF_DIM < 32) {
grid_kernel_small_gcf<GCF_DIM>
<<<npts/32,dim3(32,32)>>>(d_out_unpad,d_in,d_in_vals,npts,img_dim,d_gcf);
} else {
grid_kernel<GCF_DIM>
<<<npts/32,dim3(32,8)>>>(d_out_unpad,d_in,d_in_vals,npts,img_dim,d_gcf);
}
#endif
#endif
float kernel_time = getElapsed(start,stop);
std::cout << "Processed " << npts << " complex points in " << kernel_time << " ms." << std::endl;
std::cout << npts / 1000000.0 / kernel_time * gcf_dim * gcf_dim * 8 * POLARIZATIONS << " Gflops" << std::endl;
CUDA_CHECK_ERR(__LINE__,__FILE__);
#ifdef __MANAGED
cudaDeviceSynchronize();
#else
CUDA_CHECK_ERR(__LINE__,__FILE__);
cudaMemcpy(out, d_out,
sizeof(CmplxType)*(img_dim*img_dim+2*img_dim*gcf_dim+2*gcf_dim)*POLARIZATIONS,
cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR(__LINE__,__FILE__);
//Unpin CPU memory
cudaHostUnregister(gcf);
cudaHostUnregister(out);
cudaHostUnregister(in);
cudaHostUnregister(in_vals);
//Restore d_img and d_gcf for deallocation
d_gcf -= gcf_dim*(gcf_dim-1)/2-1;
cudaFree(d_out);
#ifdef __GATHER
cudaFree(in_ints);
cudaFree(bookmarks);
#endif
#endif
cudaEventDestroy(start); cudaEventDestroy(stop);
CUDA_CHECK_ERR(__LINE__,__FILE__);
}
template void gridGPU<double2>(double2* out, double2* in, double2* in_vals, size_t npts,
size_t img_dim, double2 *gcf, size_t gcf_dim);
template void gridGPU<float2>(float2* out, float2* in, float2* in_vals, size_t npts,
size_t img_dim, float2 *gcf, size_t gcf_dim);
|
c8f5213a323f6caeade6b797695bf8bd4e884056.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <time.h>
# include <math.h>
# include <stdio.h>
__global__ void add( int *a , int *b , int *c)
{
c[blockIdx.x] = a[blockIdx.x] +b[blockIdx.x];
}
//# define N 125
void random_ints(int* a, int h)
{
}
int main(void){
int N;
printf("\"Hello Vector !\"\n enter size of vector\n");
scanf("%d",&N);
int a[N],b[N],c[N]; // host copies of a, b,c
int *d_a,*d_b,*d_c; // device copies of a, b, c
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
//setup input values for a, b, c
for ( int i=0;i<=N;i++)
{
a[i]=i+2;
b[i]=i+3;
c[i]=0;
}
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
hipMemcpy(d_c, c, size, hipMemcpyHostToDevice);
// start clocking
clock_t start_time = clock();
//Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, d_a, d_b, d_c);
hipDeviceSynchronize();
//end clocking and measuring time for execution
clock_t stop_time = clock();
int time = stop_time - start_time;
printf("time=%d\n", time);
//Copy result back to host
hipMemcpy(c,d_c,size,hipMemcpyDeviceToHost);
printf("c=");
for(int i=0;i<N;i++){
printf("%d+",c[i]);}
printf("\n");
// Cleanup
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
}
| c8f5213a323f6caeade6b797695bf8bd4e884056.cu | # include <time.h>
# include <math.h>
# include <stdio.h>
__global__ void add( int *a , int *b , int *c)
{
c[blockIdx.x] = a[blockIdx.x] +b[blockIdx.x];
}
//# define N 125
void random_ints(int* a, int h)
{
}
int main(void){
int N;
printf("\"Hello Vector !\"\n enter size of vector\n");
scanf("%d",&N);
int a[N],b[N],c[N]; // host copies of a, b,c
int *d_a,*d_b,*d_c; // device copies of a, b, c
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//setup input values for a, b, c
for ( int i=0;i<=N;i++)
{
a[i]=i+2;
b[i]=i+3;
c[i]=0;
}
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);
// start clocking
clock_t start_time = clock();
//Launch add() kernel on GPU
add<<<N,1>>>(d_a, d_b, d_c);
cudaThreadSynchronize();
//end clocking and measuring time for execution
clock_t stop_time = clock();
int time = stop_time - start_time;
printf("time=%d\n", time);
//Copy result back to host
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("c=");
for(int i=0;i<N;i++){
printf("%d+",c[i]);}
printf("\n");
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
d6e03db1d5001a4c1d53363b6e4798d91c9afaf0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
/////Defines
#define BITMAP_ID 0x4D42 // the universal bitmap ID
//includes
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "freeglut.h"
#include <windows.h>
BITMAPINFOHEADER bitmapInfoHeader; // bitmap info header (needs windows.h to define type
unsigned char* bitmapData; // the texture data
static GLuint texnum[2];// array of 2 texture object names
float newmin = 0;
float newmax = 255;
float normalize;
/////////#include <gl/gl.h> // standard OpenGL include
/////////#include <gl/glu.h> // OpenGL utilties - we use glut - it already includes these
////// Global Variables
float globangle = 0;
//functions
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader);
int WriteBitmapFile(char *filename, int width, int height, unsigned char *imageData);
void init(void);
void display(void);
void drawBox(float w, float h, float l);
void reshape(int w, int h);
int glutmain(int argc, char** argv);
////// Lighting variables
float ambientLight[] = { 1.0f, 1.0f, 1.0f, 1.0f }; // ambient light
float diffuseLight[] = { 1.0f, 1.0f, 1.0f, 1.0f }; // diffuse light
float lightPosition[] = { 10.0f, 100.0f, 10.0f, 0.0f }; // the light position
////// Material variables
float matAmbient[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matDiff[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matAmbient1[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matDiff1[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matAmbient2[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matDiff2[] = { 1.0f, 1.0f, 1.0f, 1.0f };
// Intialized globals
const int width = bitmapInfoHeader.biWidth;
const int height = bitmapInfoHeader.biHeight;
// CUDA array sizes
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
void init()
{
glClearColor(1.0f, 1.0f, 1.0f, 0.0f); // clears colour buffer to to white
glEnable(GL_DEPTH_TEST); // hidden surface removal
glEnable(GL_CULL_FACE); // do not calculate inside of poly's
glFrontFace(GL_CCW); // counter clock-wise polygons are out
//***** lighting
glEnable(GL_SMOOTH);
glEnable(GL_LIGHTING); // enable lighting
// Setup the materials for LIGHT0
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient);
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff);
// Now setup LIGHT0
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); // setup the ambient element
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); // the diffuse element
glLightfv(GL_LIGHT0, GL_POSITION, lightPosition); // place the light in the world
// Enable the light
glEnable(GL_LIGHT0);
glEnable(GL_TEXTURE_2D); // enable 2D texturing
// load our bitmap file
bitmapData = LoadBitmapFile("cat.bmp", &bitmapInfoHeader);
// Add vectors in parallel.
hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return;
}
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return;
}
glGenTextures(2, texnum); // generate 2 texture objects
glBindTexture(GL_TEXTURE_2D, texnum[0]); // enable our first texture object
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// generate the texture image
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmapInfoHeader.biWidth,
bitmapInfoHeader.biHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, bitmapData);
glBindTexture(GL_TEXTURE_2D, texnum[1]); // enable our second texture object
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// generate the texture image
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmapInfoHeader.biWidth,
bitmapInfoHeader.biHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, bitmapData);
}
void drawBox(float w, float h, float l)
//drawBox
//***************************************************************************
//This function first translates a further x,y,z units then draws a box of
//width w, height h, length l
{
glPushMatrix(); //save modelview
w = w / 2.0; h = h / 2.0; l = l / 2.0; //adjust values so centre is in middle of box
glBindTexture(GL_TEXTURE_2D, texnum[1]); // enable our second texture object
glBegin(GL_POLYGON);
glNormal3f(0.0f, 0.0f, 1.0f); // front face
glTexCoord2f(1.0f, 1.0f);glVertex3f(w, l, h);
glTexCoord2f(0.0f, 1.0f);glVertex3f(-w, l, h);
glTexCoord2f(0.0f, 0.0f);glVertex3f(-w, -l, h);
glTexCoord2f(1.0f, 0.0f);glVertex3f(w, -l, h);
glEnd();
glPopMatrix(); //restore previous modelview matrix so leaving things as you found them
}
void display()
{
glPushMatrix();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // clear screen and depth buffer
// reset modelview matrix
glColor3f(0.8, 0.8, 1.0); //set colour not used. Automatically disabled when lighting Enabled
glTranslatef(0, 0, 0); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient);//set backdrop material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff);
glColor3f(1.0, 0.8, 0.8); //set colour not used. Automatically disabled when lighting Enabled
glPushMatrix(); //save current position/orientation, etc.
glTranslatef(0, 1.500, -5); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient1);//set backdrop material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff1);
drawBox(13, 1.0, 13); //draw back drop
glPopMatrix(); //restore previous "current" position/orientation etc.
glColor3f(0.8, 1.0, 0.8); //set colour not used. Automatically disabled when lighting Enabled
glPushMatrix(); //save current position/orientation, etc.
glTranslatef(1, 1, 1); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient1);//set teapot light material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff1);
glPopMatrix(); //restore previous "current" position/orientation etc.
glColor3f(0.4, 1.0, 1.0); //set colour not used. Automatically disabled when lighting Enabled
glPushMatrix(); //save current position/orientation, etc.
glTranslatef(0, 1, 0); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient2);//set box light material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff2);
glPopMatrix(); //restore previous "current" position/orientation etc.
// glTranslatef(0,0,0); //move by x,y and z units
//drawBox(5.0, 10.0, 0.20); //draw floor
glPopMatrix();
glFlush(); //force drawing
glutSwapBuffers(); // bring backbuffer to foreground
//any errors then display error codes
GLenum errCode;
const GLubyte *errString;
if ((errCode = glGetError()) != GL_NO_ERROR)
{
errString = gluErrorString(errCode);
fprintf(stderr, "OpenGL error : %s\n", errString);
}
}
void reshape(int w, int h)
//reshape
//************************************************************************
//OpenGL reshape callback is called when the window is resized
{
glViewport(0, 0, (GLsizei)w, (GLsizei)h); //respecify size of view port to match new w and h
glMatrixMode(GL_PROJECTION); //go into projection mode
glLoadIdentity(); //reset projection matrix
gluPerspective(60.0, (GLfloat)w / (GLfloat)h, 1.0, 200.0); //redefine projection matrix to match changes in w and h
glMatrixMode(GL_MODELVIEW); //switch back to modelview mode
glLoadIdentity(); //reset modelview matrix
gluLookAt(0.0, 2.0, 5.0, 0.0, 2.0, -1.0, 0.0, 1.0, 0.0);// note these match the initial values
//of the keybard fn vx,vy,vz, angle
}
int glutmain(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(500, 500);
glutInitWindowPosition(100, 100);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader)
{
FILE *filePtr; // the file pointer
BITMAPFILEHEADER bitmapFileHeader; // bitmap file header
unsigned char *bitmapImage; // bitmap image data
int imageIdx = 0; // image index counter
unsigned char tempRGB; // swap variable
// open filename in "read binary" mode
filePtr = fopen(filename, "rb");
if (filePtr == NULL)
return NULL;
// read the bitmap file header
fread(&bitmapFileHeader, sizeof(BITMAPFILEHEADER), 1, filePtr);
// verify that this is a bitmap by checking for the universal bitmap id
if (bitmapFileHeader.bfType != BITMAP_ID)
{
fclose(filePtr);
return NULL;
}
// read the bitmap information header
fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER), 1, filePtr);
// move file pointer to beginning of bitmap data
fseek(filePtr, bitmapFileHeader.bfOffBits, SEEK_SET);
// allocate enough memory for the bitmap image data
bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage);
// verify memory allocation
if (!bitmapImage)
{
free(bitmapImage);
fclose(filePtr);
return NULL;
}
// read in the bitmap image data
fread(bitmapImage, 1, bitmapInfoHeader->biSizeImage, filePtr);
// make sure bitmap image data was read
if (bitmapImage == NULL)
{
fclose(filePtr);
return NULL;
}
// swap the R and B values to get RGB since the bitmap color format is in BGR
for (imageIdx = 0; imageIdx < bitmapInfoHeader->biSizeImage; imageIdx += 3)
{
tempRGB = bitmapImage[imageIdx];
bitmapImage[imageIdx] = bitmapImage[imageIdx + 2];
bitmapImage[imageIdx + 2] = tempRGB;
}
// close the file and return the bitmap image data
fclose(filePtr);
return bitmapImage;
}
int WriteBitmapFile(char *filename, int width, int height, unsigned char *imageData)
{
FILE *filePtr; // file pointer
BITMAPFILEHEADER bitmapFileHeader; // bitmap file header
BITMAPINFOHEADER bitmapInfoHeader; // bitmap info header
int imageIdx; // used for swapping RGB->BGR
unsigned char tempRGB; // used for swapping
// open file for writing binary mode
filePtr = fopen(filename, "wb");
if (!filePtr)
return 0;
// define the bitmap file header
bitmapFileHeader.bfSize = sizeof(BITMAPFILEHEADER);
bitmapFileHeader.bfType = 0x4D42;
bitmapFileHeader.bfReserved1 = 0;
bitmapFileHeader.bfReserved2 = 0;
bitmapFileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
// define the bitmap information header
bitmapInfoHeader.biSize = sizeof(BITMAPINFOHEADER);
bitmapInfoHeader.biPlanes = 1;
bitmapInfoHeader.biBitCount = 24; // 24-bit
bitmapInfoHeader.biCompression = BI_RGB; // no compression
bitmapInfoHeader.biSizeImage = width * abs(height) * 3; // width * height * (RGB bytes)
bitmapInfoHeader.biXPelsPerMeter = 0;
bitmapInfoHeader.biYPelsPerMeter = 0;
bitmapInfoHeader.biClrUsed = 0;
bitmapInfoHeader.biClrImportant = 0;
bitmapInfoHeader.biWidth = width; // bitmap width
bitmapInfoHeader.biHeight = height; // bitmap height
// switch the image data from RGB to BGR
for (imageIdx = 0; imageIdx < bitmapInfoHeader.biSizeImage; imageIdx += 3)
{
tempRGB = imageData[imageIdx];
imageData[imageIdx] = imageData[imageIdx + 2];
imageData[imageIdx + 2] = tempRGB;
}
// write the bitmap file header
fwrite(&bitmapFileHeader, 1, sizeof(BITMAPFILEHEADER), filePtr);
// write the bitmap info header
fwrite(&bitmapInfoHeader, 1, sizeof(BITMAPINFOHEADER), filePtr);
// write the image data
fwrite(imageData, 1, bitmapInfoHeader.biSizeImage, filePtr);
// close our file
fclose(filePtr);
return 1;
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void loadBitmapFileKernel(int width, int height, unsigned char *bitmapData, float newmax, float newmin, float normalize)
{
int i = threadIdx.x;
int min = width;
int max = height;
normalize = (min)*(newmax - newmin) / (max - min) + newmin;
bitmapData[i] = bitmapData[i] * 0.8;
bitmapData[i + 1] = bitmapData[i + 1] * 0.8;
bitmapData[i + 2] = bitmapData[i + 2] * 0.8;
}
int main()
{
int glutmain(int argc, char** argv);
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
// sets up a device for the bitmapData
unsigned char *dev_bitmapData = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for bitmap data .
cudaStatus = hipMalloc((void**)&dev_bitmapData, sizeof(bitmapData));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy bitmap data to the GPU buffer
cudaStatus = hipMemcpy(dev_bitmapData, bitmapData, sizeof(bitmapData), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( loadBitmapFileKernel) , dim3(1), dim3(size), 0, 0, width, height, bitmapData, newmax, newmin, normalize);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
// free up memory
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_bitmapData);
return cudaStatus;
}
| d6e03db1d5001a4c1d53363b6e4798d91c9afaf0.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
/////Defines
#define BITMAP_ID 0x4D42 // the universal bitmap ID
//includes
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "freeglut.h"
#include <windows.h>
BITMAPINFOHEADER bitmapInfoHeader; // bitmap info header (needs windows.h to define type
unsigned char* bitmapData; // the texture data
static GLuint texnum[2];// array of 2 texture object names
float newmin = 0;
float newmax = 255;
float normalize;
/////////#include <gl/gl.h> // standard OpenGL include
/////////#include <gl/glu.h> // OpenGL utilties - we use glut - it already includes these
////// Global Variables
float globangle = 0;
//functions
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader);
int WriteBitmapFile(char *filename, int width, int height, unsigned char *imageData);
void init(void);
void display(void);
void drawBox(float w, float h, float l);
void reshape(int w, int h);
int glutmain(int argc, char** argv);
////// Lighting variables
float ambientLight[] = { 1.0f, 1.0f, 1.0f, 1.0f }; // ambient light
float diffuseLight[] = { 1.0f, 1.0f, 1.0f, 1.0f }; // diffuse light
float lightPosition[] = { 10.0f, 100.0f, 10.0f, 0.0f }; // the light position
////// Material variables
float matAmbient[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matDiff[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matAmbient1[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matDiff1[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matAmbient2[] = { 1.0f, 1.0f, 1.0f, 1.0f };
float matDiff2[] = { 1.0f, 1.0f, 1.0f, 1.0f };
// Intialized globals
const int width = bitmapInfoHeader.biWidth;
const int height = bitmapInfoHeader.biHeight;
// CUDA array sizes
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
void init()
{
glClearColor(1.0f, 1.0f, 1.0f, 0.0f); // clears colour buffer to to white
glEnable(GL_DEPTH_TEST); // hidden surface removal
glEnable(GL_CULL_FACE); // do not calculate inside of poly's
glFrontFace(GL_CCW); // counter clock-wise polygons are out
//***** lighting
glEnable(GL_SMOOTH);
glEnable(GL_LIGHTING); // enable lighting
// Setup the materials for LIGHT0
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient);
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff);
// Now setup LIGHT0
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight); // setup the ambient element
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight); // the diffuse element
glLightfv(GL_LIGHT0, GL_POSITION, lightPosition); // place the light in the world
// Enable the light
glEnable(GL_LIGHT0);
glEnable(GL_TEXTURE_2D); // enable 2D texturing
// load our bitmap file
bitmapData = LoadBitmapFile("cat.bmp", &bitmapInfoHeader);
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return;
}
glGenTextures(2, texnum); // generate 2 texture objects
glBindTexture(GL_TEXTURE_2D, texnum[0]); // enable our first texture object
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// generate the texture image
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmapInfoHeader.biWidth,
bitmapInfoHeader.biHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, bitmapData);
glBindTexture(GL_TEXTURE_2D, texnum[1]); // enable our second texture object
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
// generate the texture image
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, bitmapInfoHeader.biWidth,
bitmapInfoHeader.biHeight, 0, GL_RGB, GL_UNSIGNED_BYTE, bitmapData);
}
void drawBox(float w, float h, float l)
//drawBox
//***************************************************************************
//This function first translates a further x,y,z units then draws a box of
//width w, height h, length l
{
glPushMatrix(); //save modelview
w = w / 2.0; h = h / 2.0; l = l / 2.0; //adjust values so centre is in middle of box
glBindTexture(GL_TEXTURE_2D, texnum[1]); // enable our second texture object
glBegin(GL_POLYGON);
glNormal3f(0.0f, 0.0f, 1.0f); // front face
glTexCoord2f(1.0f, 1.0f);glVertex3f(w, l, h);
glTexCoord2f(0.0f, 1.0f);glVertex3f(-w, l, h);
glTexCoord2f(0.0f, 0.0f);glVertex3f(-w, -l, h);
glTexCoord2f(1.0f, 0.0f);glVertex3f(w, -l, h);
glEnd();
glPopMatrix(); //restore previous modelview matrix so leaving things as you found them
}
void display()
{
glPushMatrix();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // clear screen and depth buffer
// reset modelview matrix
glColor3f(0.8, 0.8, 1.0); //set colour not used. Automatically disabled when lighting Enabled
glTranslatef(0, 0, 0); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient);//set backdrop material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff);
glColor3f(1.0, 0.8, 0.8); //set colour not used. Automatically disabled when lighting Enabled
glPushMatrix(); //save current position/orientation, etc.
glTranslatef(0, 1.500, -5); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient1);//set backdrop material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff1);
drawBox(13, 1.0, 13); //draw back drop
glPopMatrix(); //restore previous "current" position/orientation etc.
glColor3f(0.8, 1.0, 0.8); //set colour not used. Automatically disabled when lighting Enabled
glPushMatrix(); //save current position/orientation, etc.
glTranslatef(1, 1, 1); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient1);//set teapot light material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff1);
glPopMatrix(); //restore previous "current" position/orientation etc.
glColor3f(0.4, 1.0, 1.0); //set colour not used. Automatically disabled when lighting Enabled
glPushMatrix(); //save current position/orientation, etc.
glTranslatef(0, 1, 0); //move by x,y and z units
glMaterialfv(GL_FRONT, GL_AMBIENT, matAmbient2);//set box light material properties
glMaterialfv(GL_FRONT, GL_DIFFUSE, matDiff2);
glPopMatrix(); //restore previous "current" position/orientation etc.
// glTranslatef(0,0,0); //move by x,y and z units
//drawBox(5.0, 10.0, 0.20); //draw floor
glPopMatrix();
glFlush(); //force drawing
glutSwapBuffers(); // bring backbuffer to foreground
//any errors then display error codes
GLenum errCode;
const GLubyte *errString;
if ((errCode = glGetError()) != GL_NO_ERROR)
{
errString = gluErrorString(errCode);
fprintf(stderr, "OpenGL error : %s\n", errString);
}
}
void reshape(int w, int h)
//reshape
//************************************************************************
//OpenGL reshape callback is called when the window is resized
{
glViewport(0, 0, (GLsizei)w, (GLsizei)h); //respecify size of view port to match new w and h
glMatrixMode(GL_PROJECTION); //go into projection mode
glLoadIdentity(); //reset projection matrix
gluPerspective(60.0, (GLfloat)w / (GLfloat)h, 1.0, 200.0); //redefine projection matrix to match changes in w and h
glMatrixMode(GL_MODELVIEW); //switch back to modelview mode
glLoadIdentity(); //reset modelview matrix
gluLookAt(0.0, 2.0, 5.0, 0.0, 2.0, -1.0, 0.0, 1.0, 0.0);// note these match the initial values
//of the keybard fn vx,vy,vz, angle
}
int glutmain(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize(500, 500);
glutInitWindowPosition(100, 100);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
unsigned char *LoadBitmapFile(char *filename, BITMAPINFOHEADER *bitmapInfoHeader)
{
FILE *filePtr; // the file pointer
BITMAPFILEHEADER bitmapFileHeader; // bitmap file header
unsigned char *bitmapImage; // bitmap image data
int imageIdx = 0; // image index counter
unsigned char tempRGB; // swap variable
// open filename in "read binary" mode
filePtr = fopen(filename, "rb");
if (filePtr == NULL)
return NULL;
// read the bitmap file header
fread(&bitmapFileHeader, sizeof(BITMAPFILEHEADER), 1, filePtr);
// verify that this is a bitmap by checking for the universal bitmap id
if (bitmapFileHeader.bfType != BITMAP_ID)
{
fclose(filePtr);
return NULL;
}
// read the bitmap information header
fread(bitmapInfoHeader, sizeof(BITMAPINFOHEADER), 1, filePtr);
// move file pointer to beginning of bitmap data
fseek(filePtr, bitmapFileHeader.bfOffBits, SEEK_SET);
// allocate enough memory for the bitmap image data
bitmapImage = (unsigned char*)malloc(bitmapInfoHeader->biSizeImage);
// verify memory allocation
if (!bitmapImage)
{
free(bitmapImage);
fclose(filePtr);
return NULL;
}
// read in the bitmap image data
fread(bitmapImage, 1, bitmapInfoHeader->biSizeImage, filePtr);
// make sure bitmap image data was read
if (bitmapImage == NULL)
{
fclose(filePtr);
return NULL;
}
// swap the R and B values to get RGB since the bitmap color format is in BGR
for (imageIdx = 0; imageIdx < bitmapInfoHeader->biSizeImage; imageIdx += 3)
{
tempRGB = bitmapImage[imageIdx];
bitmapImage[imageIdx] = bitmapImage[imageIdx + 2];
bitmapImage[imageIdx + 2] = tempRGB;
}
// close the file and return the bitmap image data
fclose(filePtr);
return bitmapImage;
}
int WriteBitmapFile(char *filename, int width, int height, unsigned char *imageData)
{
FILE *filePtr; // file pointer
BITMAPFILEHEADER bitmapFileHeader; // bitmap file header
BITMAPINFOHEADER bitmapInfoHeader; // bitmap info header
int imageIdx; // used for swapping RGB->BGR
unsigned char tempRGB; // used for swapping
// open file for writing binary mode
filePtr = fopen(filename, "wb");
if (!filePtr)
return 0;
// define the bitmap file header
bitmapFileHeader.bfSize = sizeof(BITMAPFILEHEADER);
bitmapFileHeader.bfType = 0x4D42;
bitmapFileHeader.bfReserved1 = 0;
bitmapFileHeader.bfReserved2 = 0;
bitmapFileHeader.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
// define the bitmap information header
bitmapInfoHeader.biSize = sizeof(BITMAPINFOHEADER);
bitmapInfoHeader.biPlanes = 1;
bitmapInfoHeader.biBitCount = 24; // 24-bit
bitmapInfoHeader.biCompression = BI_RGB; // no compression
bitmapInfoHeader.biSizeImage = width * abs(height) * 3; // width * height * (RGB bytes)
bitmapInfoHeader.biXPelsPerMeter = 0;
bitmapInfoHeader.biYPelsPerMeter = 0;
bitmapInfoHeader.biClrUsed = 0;
bitmapInfoHeader.biClrImportant = 0;
bitmapInfoHeader.biWidth = width; // bitmap width
bitmapInfoHeader.biHeight = height; // bitmap height
// switch the image data from RGB to BGR
for (imageIdx = 0; imageIdx < bitmapInfoHeader.biSizeImage; imageIdx += 3)
{
tempRGB = imageData[imageIdx];
imageData[imageIdx] = imageData[imageIdx + 2];
imageData[imageIdx + 2] = tempRGB;
}
// write the bitmap file header
fwrite(&bitmapFileHeader, 1, sizeof(BITMAPFILEHEADER), filePtr);
// write the bitmap info header
fwrite(&bitmapInfoHeader, 1, sizeof(BITMAPINFOHEADER), filePtr);
// write the image data
fwrite(imageData, 1, bitmapInfoHeader.biSizeImage, filePtr);
// close our file
fclose(filePtr);
return 1;
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void loadBitmapFileKernel(int width, int height, unsigned char *bitmapData, float newmax, float newmin, float normalize)
{
int i = threadIdx.x;
int min = width;
int max = height;
normalize = (min)*(newmax - newmin) / (max - min) + newmin;
bitmapData[i] = bitmapData[i] * 0.8;
bitmapData[i + 1] = bitmapData[i + 1] * 0.8;
bitmapData[i + 2] = bitmapData[i + 2] * 0.8;
}
int main()
{
int glutmain(int argc, char** argv);
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
// sets up a device for the bitmapData
unsigned char *dev_bitmapData = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for bitmap data .
cudaStatus = cudaMalloc((void**)&dev_bitmapData, sizeof(bitmapData));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy bitmap data to the GPU buffer
cudaStatus = cudaMemcpy(dev_bitmapData, bitmapData, sizeof(bitmapData), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
loadBitmapFileKernel <<<1, size>>>(width, height, bitmapData, newmax, newmin, normalize);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
// free up memory
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_bitmapData);
return cudaStatus;
}
|
ea6925080f2fd8174816a72ed3c8628b1889aa0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <system/Environment.h>
#include <loops/transform_float.h>
#include <types/types.h>
#include <system/op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
__global__ void transformFloatSimple(const void *x, const Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, const Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>(
x, xShapeInfo,
params,
z, zShapeInfo,
allocationPointer, reductionPointer,
tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X, typename Y>
_CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
template <typename OpType>
__device__ void TransformFloat<X,Z>::transformCuda(const void *vx, const Nd4jLong *xShapeInfo,
void *vparams,
void *vz, const Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<Z*>(vparams);
auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer);
if(OpType::requiresSpecial) {
OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
return;
}
else {
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') {
for (Nd4jLong i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
}
};
template<typename X, typename Y>
__device__ void TransformFloat<X,Y>::transformCudaLegacy(
const int opNum,
const void *x, const Nd4jLong *xShapeInfo,
void *params,
void *z, const Nd4jLong *zShapeInfo,
int *allocationPointer, void *reductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
}
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void TransformFloat<X,Z>::intermediateShaped(
dim3 launchDims, hipStream_t *stream,
const void *x, const Nd4jLong *xShape, int xRank,
void *extraParams,
void *z, const Nd4jLong *zShape, int zRank,
int *allocationPointer, void *reductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( transformFloatSimple<X, Z, OpType>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES);
}
}
| ea6925080f2fd8174816a72ed3c8628b1889aa0a.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#include <system/Environment.h>
#include <loops/transform_float.h>
#include <types/types.h>
#include <system/op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename Z, typename OpType>
__global__ void transformFloatSimple(const void *x, const Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, const Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
functions::transform::TransformFloat<X,Z>::template transformCuda<OpType>(
x, xShapeInfo,
params,
z, zShapeInfo,
allocationPointer, reductionPointer,
tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X, typename Y>
_CUDA_H void TransformFloat<X,Y>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, const void *x, const Nd4jLong *xShape, int xRank, void *extraParams, void *z, const Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X, typename Z>
template <typename OpType>
__device__ void TransformFloat<X,Z>::transformCuda(const void *vx, const Nd4jLong *xShapeInfo,
void *vparams,
void *vz, const Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
auto x = reinterpret_cast<const X*>(vx);
auto z = reinterpret_cast<Z*>(vz);
auto params = reinterpret_cast<Z*>(vparams);
auto reductionPointer = reinterpret_cast<Z*>(vreductionPointer);
if(OpType::requiresSpecial) {
OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
return;
}
else {
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder && xOrder == 'c') {
for (Nd4jLong i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto zOffset = shape::getIndexOffset(i, zShapeInfo);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
}
};
template<typename X, typename Y>
__device__ void TransformFloat<X,Y>::transformCudaLegacy(
const int opNum,
const void *x, const Nd4jLong *xShapeInfo,
void *params,
void *z, const Nd4jLong *zShapeInfo,
int *allocationPointer, void *reductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_TT(transformCuda, PARAMS(x, xShapeInfo, params, z, zShapeInfo, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_FLOAT_OPS);
}
template<typename X, typename Z>
template <typename OpType>
_CUDA_H void TransformFloat<X,Z>::intermediateShaped(
dim3 launchDims, cudaStream_t *stream,
const void *x, const Nd4jLong *xShape, int xRank,
void *extraParams,
void *z, const Nd4jLong *zShape, int zRank,
int *allocationPointer, void *reductionPointer,
const Nd4jLong *tadShapeInfo, const Nd4jLong *tadOffsets) {
transformFloatSimple<X, Z, OpType><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
sd::DebugHelper::checkErrorCode(stream, "transformFloat(...) failed");
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT TransformFloat, , LIBND4J_TYPES, FLOAT_TYPES);
}
}
|
76c6581f8c5ad0243b9467c23b0f96106db1e429.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// snmg spmv
// Author: Alex Fender afender@nvidia.com
#include "rmm_utils.h"
#include "utilities/cusparse_helper.h"
#include "spmv.cuh"
namespace cugraph
{
template <typename IndexType, typename ValueType>
SNMGcsrmv<IndexType,ValueType>::SNMGcsrmv(SNMGinfo & env_, size_t* part_off_,
IndexType * off_, IndexType * ind_, ValueType * val_, ValueType ** x):
env(env_), part_off(part_off_), off(off_), ind(ind_), val(val_) {
sync_all();
stream = nullptr;
i = env.get_thread_num();
p = env.get_num_threads();
v_glob = part_off[p];
v_loc = part_off[i+1]-part_off[i];
IndexType tmp;
hipMemcpy(&tmp, &off[v_loc], sizeof(IndexType),hipMemcpyDeviceToHost);
cudaCheckError();
e_loc = tmp;
// Allocate the local result
ALLOC_TRY ((void**)&y_loc, v_loc*sizeof(ValueType), stream);
ValueType h_one = 1.0;
ValueType h_zero = 0.0;
spmv.setup(v_loc, v_glob, e_loc, &h_one, val, off, ind, x[i], &h_zero, y_loc);
}
template <typename IndexType, typename ValueType>
SNMGcsrmv<IndexType,ValueType>::~SNMGcsrmv() {
ALLOC_FREE_TRY(y_loc, stream);
}
template <typename IndexType, typename ValueType>
void SNMGcsrmv<IndexType,ValueType>::run (ValueType ** x) {
sync_all();
ValueType h_one = 1.0;
ValueType h_zero = 0.0;
spmv.run(v_loc, v_glob, e_loc, &h_one, val, off, ind, x[i], &h_zero, y_loc);
#ifdef SNMG_DEBUG
print_mem_usage();
#pragma omp master
{std::cout << omp_get_wtime() - t << " ";}
Wait for all local spmv
t = omp_get_wtime();
sync_all();
#pragma omp master
{std::cout << omp_get_wtime() - t << " ";}
Update the output vector
#endif
allgather (env, part_off, y_loc, x);
}
template class SNMGcsrmv<int, double>;
template class SNMGcsrmv<int, float>;
template <typename idx_t,typename val_t>
gdf_error gdf_snmg_csrmv_impl (size_t * part_offsets, gdf_column * off, gdf_column * ind, gdf_column * val, gdf_column ** x_cols){
GDF_REQUIRE( part_offsets != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( off != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( ind != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( val != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( x_cols != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( off->size > 0, GDF_INVALID_API_CALL );
GDF_REQUIRE( ind->size > 0, GDF_INVALID_API_CALL );
GDF_REQUIRE( val->size > 0, GDF_INVALID_API_CALL );
GDF_REQUIRE( ind->size == val->size, GDF_COLUMN_SIZE_MISMATCH );
GDF_REQUIRE( off->dtype == ind->dtype, GDF_UNSUPPORTED_DTYPE );
GDF_REQUIRE( off->null_count + ind->null_count + val->null_count == 0 , GDF_VALIDITY_UNSUPPORTED );
auto p = omp_get_num_threads();
val_t* x[p];
for (auto i = 0; i < p; ++i)
{
GDF_REQUIRE( x_cols[i] != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( x_cols[i]->size > 0, GDF_INVALID_API_CALL );
x[i]= static_cast<val_t*>(x_cols[i]->data);
}
#pragma omp master
{
Cusparse::get_handle();
}
SNMGinfo snmg_env;
SNMGcsrmv<idx_t,val_t> spmv_solver(snmg_env, part_offsets,
static_cast<idx_t*>(off->data),
static_cast<idx_t*>(ind->data),
static_cast<val_t*>(val->data),
x);
spmv_solver.run(x);
#pragma omp master
{
Cusparse::destroy_handle();
}
return GDF_SUCCESS;
}
} //namespace
gdf_error gdf_snmg_csrmv (size_t * part_offsets, gdf_column * off, gdf_column * ind, gdf_column * val, gdf_column ** x_cols){
switch (val->dtype) {
case GDF_FLOAT32: return cugraph::gdf_snmg_csrmv_impl<int32_t,float>(part_offsets, off, ind, val, x_cols);
case GDF_FLOAT64: return cugraph::gdf_snmg_csrmv_impl<int32_t,double>(part_offsets, off, ind, val, x_cols);
default: return GDF_UNSUPPORTED_DTYPE;
}
}
| 76c6581f8c5ad0243b9467c23b0f96106db1e429.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// snmg spmv
// Author: Alex Fender afender@nvidia.com
#include "rmm_utils.h"
#include "utilities/cusparse_helper.h"
#include "spmv.cuh"
namespace cugraph
{
template <typename IndexType, typename ValueType>
SNMGcsrmv<IndexType,ValueType>::SNMGcsrmv(SNMGinfo & env_, size_t* part_off_,
IndexType * off_, IndexType * ind_, ValueType * val_, ValueType ** x):
env(env_), part_off(part_off_), off(off_), ind(ind_), val(val_) {
sync_all();
stream = nullptr;
i = env.get_thread_num();
p = env.get_num_threads();
v_glob = part_off[p];
v_loc = part_off[i+1]-part_off[i];
IndexType tmp;
cudaMemcpy(&tmp, &off[v_loc], sizeof(IndexType),cudaMemcpyDeviceToHost);
cudaCheckError();
e_loc = tmp;
// Allocate the local result
ALLOC_TRY ((void**)&y_loc, v_loc*sizeof(ValueType), stream);
ValueType h_one = 1.0;
ValueType h_zero = 0.0;
spmv.setup(v_loc, v_glob, e_loc, &h_one, val, off, ind, x[i], &h_zero, y_loc);
}
template <typename IndexType, typename ValueType>
SNMGcsrmv<IndexType,ValueType>::~SNMGcsrmv() {
ALLOC_FREE_TRY(y_loc, stream);
}
template <typename IndexType, typename ValueType>
void SNMGcsrmv<IndexType,ValueType>::run (ValueType ** x) {
sync_all();
ValueType h_one = 1.0;
ValueType h_zero = 0.0;
spmv.run(v_loc, v_glob, e_loc, &h_one, val, off, ind, x[i], &h_zero, y_loc);
#ifdef SNMG_DEBUG
print_mem_usage();
#pragma omp master
{std::cout << omp_get_wtime() - t << " ";}
Wait for all local spmv
t = omp_get_wtime();
sync_all();
#pragma omp master
{std::cout << omp_get_wtime() - t << " ";}
Update the output vector
#endif
allgather (env, part_off, y_loc, x);
}
template class SNMGcsrmv<int, double>;
template class SNMGcsrmv<int, float>;
template <typename idx_t,typename val_t>
gdf_error gdf_snmg_csrmv_impl (size_t * part_offsets, gdf_column * off, gdf_column * ind, gdf_column * val, gdf_column ** x_cols){
GDF_REQUIRE( part_offsets != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( off != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( ind != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( val != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( x_cols != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( off->size > 0, GDF_INVALID_API_CALL );
GDF_REQUIRE( ind->size > 0, GDF_INVALID_API_CALL );
GDF_REQUIRE( val->size > 0, GDF_INVALID_API_CALL );
GDF_REQUIRE( ind->size == val->size, GDF_COLUMN_SIZE_MISMATCH );
GDF_REQUIRE( off->dtype == ind->dtype, GDF_UNSUPPORTED_DTYPE );
GDF_REQUIRE( off->null_count + ind->null_count + val->null_count == 0 , GDF_VALIDITY_UNSUPPORTED );
auto p = omp_get_num_threads();
val_t* x[p];
for (auto i = 0; i < p; ++i)
{
GDF_REQUIRE( x_cols[i] != nullptr, GDF_INVALID_API_CALL );
GDF_REQUIRE( x_cols[i]->size > 0, GDF_INVALID_API_CALL );
x[i]= static_cast<val_t*>(x_cols[i]->data);
}
#pragma omp master
{
Cusparse::get_handle();
}
SNMGinfo snmg_env;
SNMGcsrmv<idx_t,val_t> spmv_solver(snmg_env, part_offsets,
static_cast<idx_t*>(off->data),
static_cast<idx_t*>(ind->data),
static_cast<val_t*>(val->data),
x);
spmv_solver.run(x);
#pragma omp master
{
Cusparse::destroy_handle();
}
return GDF_SUCCESS;
}
} //namespace
gdf_error gdf_snmg_csrmv (size_t * part_offsets, gdf_column * off, gdf_column * ind, gdf_column * val, gdf_column ** x_cols){
switch (val->dtype) {
case GDF_FLOAT32: return cugraph::gdf_snmg_csrmv_impl<int32_t,float>(part_offsets, off, ind, val, x_cols);
case GDF_FLOAT64: return cugraph::gdf_snmg_csrmv_impl<int32_t,double>(part_offsets, off, ind, val, x_cols);
default: return GDF_UNSUPPORTED_DTYPE;
}
}
|
2613668167250fe52d6a93716d8eb213d2bc4f66.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void upsweep(int twod, int offset, int* output)
{
int index = threadIdx.x;
if (index < twod) {
int ai = offset * (2 * index + 1) - 1;
int bi = offset * (2 * index + 2) - 1;
output[bi] += output[ai];
}
}
__global__ void downsweep(int twod, int offset, int* output)
{
int index = threadIdx.x;
if (index < twod) {
int ai = offset * (2 * index + 1) - 1;
int bi = offset * (2 * index + 2) - 1;
int t = output[ai];
output[ai] = output[bi];
output[bi] += t;
}
}
__global__ void pairs_repeat(int n, int* x, int* x_shift, int* repeat)
{
int index = threadIdx.x;
if (index > 0)
repeat[index-1] = x[index] == x_shift[index];
}
extern void use_upsweep(int twod, int offset, int* output)
{
int threadsPerBlock = 1024;
//int numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
upsweep << <1, threadsPerBlock >> > (twod, offset, output);
hipDeviceSynchronize();
}
extern void use_downsweep(int twod, int offset, int* output)
{
int threadsPerBlock = 1024;
//int numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
downsweep << <1, threadsPerBlock >> > (twod, offset, output);
hipDeviceSynchronize();
}
extern void use_pairs_repeat(int n, int* x, int* x_shift, int* repeat)
{
int threadsPerBlock = 1024;
//int numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
pairs_repeat << <1, threadsPerBlock >> > (n, x, x_shift, repeat);
hipDeviceSynchronize();
}
| 2613668167250fe52d6a93716d8eb213d2bc4f66.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void upsweep(int twod, int offset, int* output)
{
int index = threadIdx.x;
if (index < twod) {
int ai = offset * (2 * index + 1) - 1;
int bi = offset * (2 * index + 2) - 1;
output[bi] += output[ai];
}
}
__global__ void downsweep(int twod, int offset, int* output)
{
int index = threadIdx.x;
if (index < twod) {
int ai = offset * (2 * index + 1) - 1;
int bi = offset * (2 * index + 2) - 1;
int t = output[ai];
output[ai] = output[bi];
output[bi] += t;
}
}
__global__ void pairs_repeat(int n, int* x, int* x_shift, int* repeat)
{
int index = threadIdx.x;
if (index > 0)
repeat[index-1] = x[index] == x_shift[index];
}
extern void use_upsweep(int twod, int offset, int* output)
{
int threadsPerBlock = 1024;
//int numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
upsweep << <1, threadsPerBlock >> > (twod, offset, output);
cudaDeviceSynchronize();
}
extern void use_downsweep(int twod, int offset, int* output)
{
int threadsPerBlock = 1024;
//int numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
downsweep << <1, threadsPerBlock >> > (twod, offset, output);
cudaDeviceSynchronize();
}
extern void use_pairs_repeat(int n, int* x, int* x_shift, int* repeat)
{
int threadsPerBlock = 1024;
//int numberOfBlocks = (N + threadsPerBlock - 1) / threadsPerBlock;
pairs_repeat << <1, threadsPerBlock >> > (n, x, x_shift, repeat);
cudaDeviceSynchronize();
}
|
245020cab113f5b5a6a828df2c0bd5d31d723a3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/cross_entropy_kernel.h"
#ifdef __NVCC__
#include "hipcub/hipcub.hpp"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/math/softmax.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpudnn/softmax_gpudnn.h"
namespace phi {
#define ALIGN_BYTES 16
enum class SoftmaxMode { kSoftmax, kLogSoftmax, kCrossEntropy };
// Wrapper of log function. Use log(float32) for float16
template <typename T>
static __device__ __forceinline__ T Log(T x) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
AccT logx = ::log(static_cast<AccT>(x));
return paddle::operators::math::TolerableValue<T>()(static_cast<T>(logx));
}
// Wrapper of exp function. Use exp(float32) for float16
template <typename T>
static __device__ __forceinline__ T Exp(T x) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
AccT expx = ::exp(static_cast<AccT>(x));
return paddle::operators::math::TolerableValue<T>()(static_cast<T>(expx));
}
template <typename Tx, typename Ty = Tx>
struct ExpAddFunctor {
HOSTDEVICE inline ExpAddFunctor(Tx max) : max(max) {}
HOSTDEVICE inline Ty operator()(const Tx& sum, const Tx& x) const {
return static_cast<Ty>(sum + ::exp(x - max));
}
private:
Tx max;
};
/*
Cross entropy soft label with dynamic size on axis (log2_elements is
varibale).
- if the input is softmaxcompute loss with softmax
- if the input is log_softmax, compute loss with log_softmax and update
softmax
*/
template <typename T, typename VecT, bool InLogMode = false>
__global__ void CrossEntropySoftLabel(T* loss,
T* softmaxwrt,
const T* softmax,
const T* labels,
const int n,
const int dim,
const int d,
int log2_elements) {
const int kDimCeil = 1 << log2_elements;
const int kVSize = sizeof(VecT) / sizeof(T);
#ifdef __HIPCC__
const int kThreadPerBlock = 256;
#else
const int kThreadPerBlock = 512;
#endif
const int kBatchPerBlock = 1;
const int kWarpSize = 32; // (dim < 32) ? dim : 32;
const int kBatchSize = 1;
const int kThreadPerBatch = kThreadPerBlock / kBatchPerBlock;
const int kWarpPerBatch = kThreadPerBatch / kWarpSize;
const int kIterations = (dim + kThreadPerBatch - 1) / kThreadPerBatch;
const int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1;
const int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
T sum[kBatchSize]{static_cast<T>(0.0)};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
int ids = first_batch + i;
if (ids >= n * d) break;
int idx_n = ids / d;
int idx_d = ids % d;
#pragma unroll
for (int it = 0; it < kIterations; ++it) {
int idx_dim = it * kThreadPerBatch + threadIdx.x;
int idx = idx_n * dim * d + idx_dim * d + idx_d;
if (idx_n < n && idx_dim < dim) {
VecT softmaxdata;
if (InLogMode) {
softmaxdata = reinterpret_cast<VecT*>(&softmaxwrt[idx])[0];
} else {
softmaxdata = reinterpret_cast<const VecT*>(&softmax[idx])[0];
}
VecT labelsdata = reinterpret_cast<const VecT*>(&labels[idx])[0];
T* softmaxptr = reinterpret_cast<T*>(&softmaxdata);
T* labelsptr = reinterpret_cast<T*>(&labelsdata);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
if (InLogMode) {
sum[i] -= softmaxptr[s] * labelsptr[s];
softmaxptr[s] = Exp(softmaxptr[s]);
} else {
sum[i] -= Log(softmaxptr[s]) * labelsptr[s];
}
}
if (InLogMode) {
reinterpret_cast<VecT*>(&softmaxwrt[idx])[0] = softmaxdata;
}
}
}
}
phi::WarpReduceSum<T, kBatchSize, kWarpSize>(sum);
__syncthreads();
__shared__ T sumshare[kWarpPerBatch][kBatchPerBlock][kBatchSize];
if (threadIdx.x % kWarpSize == 0) {
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
sumshare[threadIdx.x / kWarpSize][threadIdx.y][i] = sum[i];
}
}
__syncthreads();
// write
if (threadIdx.x == 0) {
for (int i = 0; i < kBatchSize; i++) {
int ids = first_batch + i;
if (ids < n * d) {
loss[ids] = sumshare[0][threadIdx.y][i];
for (int s = 1; s < kWarpPerBatch; s++) {
loss[ids] += sumshare[s][threadIdx.y][i];
}
}
}
}
}
/*
Hard label cross entropy.
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyHardLabel(T* loss,
const T* softmax,
const LabelT* labels,
const int n,
const int dim,
const int d,
const int ignore_idx) {
int64_t ids = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = ids / d;
int64_t idx_d = ids % d;
// thread ids compute loss[ids] using softmax[idx]
if (ids < n * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (lbl < 0) { // label is negative
loss[ids] = static_cast<T>(0.0);
} else { // label is positive of zero
int64_t idx = idx_n * dim * d + lbl * d + idx_d;
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -Log(softmax[idx]);
}
} else {
// IgnoreIndex is false
loss[ids] = -Log(softmax[idx]);
}
}
}
}
/*
Hard label cross entropy with exp.
Input: log softmax
Output: loss and exp(input)
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyExpHardLabel(T* loss,
T* softmax,
const LabelT* labels,
const int n,
const int dim,
const int d,
const int ignore_idx) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = idx / (d * dim);
int64_t idx_dim = (idx / d) % dim;
int64_t idx_d = idx % d;
int64_t ids = idx_n * d + idx_d;
if (idx < n * dim * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (idx_dim == lbl) {
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -softmax[idx];
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < dim) {
if (lbl == idx_dim) {
loss[ids] = -softmax[idx];
}
} else {
loss[ids] = static_cast<T>(0.0);
}
}
softmax[idx] = Exp(softmax[idx]);
}
}
template <typename T, typename AccT, int VecSize, class ReduceFunctor>
__device__ __forceinline__ AccT ThreadReduce(const T* input,
int size,
const int offset,
AccT init,
ReduceFunctor reducer) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
AccT val = init;
if (offset > 0) {
input -= offset;
size += offset;
if (tid >= offset) {
val = reducer(val, input[tid]);
}
size -= blockDim.x;
input += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
*ins_vec = reinterpret_cast<const VecT*>(input)[tid];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
val = reducer(val, ins[i]);
}
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
val = reducer(val, input[tid]);
}
return val;
}
template <typename T, bool IgnoreIndex>
__device__ __forceinline__ void ComputeLoss(T* loss,
const T loss_value,
const int label_id,
const int64_t label_value,
const int tid,
const int vec_size,
const int offset,
const int ignore_index) {
int loss_id = vec_size * tid + offset;
if (IgnoreIndex) {
if (label_value == loss_id) {
if (label_value == ignore_index) {
loss[label_id] = static_cast<T>(0.0f);
} else {
loss[label_id] = loss_value;
}
}
} else {
if (label_value == loss_id) {
loss[label_id] = loss_value;
}
}
}
template <typename T,
typename AccT,
typename LabelT,
int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void VectorizedSoftmaxForwardImpl(
T* loss,
T* softmax,
const T* logits,
const LabelT* label,
int size,
const int offset,
const phi::LogSoftmaxForwardFunctor<AccT>& func,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
int loss_id_offset = 0;
if (offset > 0) {
logits -= offset;
softmax -= offset;
size += offset;
loss_id_offset -= offset;
if (tid >= offset) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
1,
loss_id_offset,
ignore_index);
}
}
size -= blockDim.x;
logits += blockDim.x;
softmax += blockDim.x;
loss_id_offset += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
T outs[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
VecT* outs_vec = reinterpret_cast<VecT*>(&outs);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
// read
*ins_vec = reinterpret_cast<const VecT*>(logits)[tid];
#pragma unroll
// compute
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
outs[i] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
VecSize,
loss_id_offset + i,
ignore_index);
}
}
// write
reinterpret_cast<VecT*>(softmax)[tid] = *outs_vec;
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
1,
loss_id_offset,
ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T,
typename AccT,
typename LabelT,
int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void ScalarSoftmaxForwardImpl(
T* loss,
T* softmax,
const T* logits,
const LabelT* label,
const int size,
const phi::LogSoftmaxForwardFunctor<AccT>& func,
const int ignore_index) {
int tid = threadIdx.x;
int remain = size % (VecSize * blockDim.x);
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
// main part
for (; tid < (size - remain); tid += VecSize * blockDim.x) {
T ins[VecSize];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
ins[i] = logits[tid + i * blockDim.x];
}
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
softmax[tid + i * blockDim.x] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
VecSize,
i,
ignore_index);
}
}
}
// tail part
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
1,
0,
ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T,
typename AccT,
typename LabelT,
int VecSize,
bool IgnoreIndex>
__global__ void VectorizedSoftmaxForward(T* loss,
T* softmax,
const T* logits,
const LabelT* label,
const int high_dim,
const int mid_dim,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
// each block deal with one batch
logits += blockIdx.x * mid_dim;
softmax += blockIdx.x * mid_dim;
const int input_offset = ((uint64_t)logits) % ALIGN_BYTES / sizeof(T);
const int output_offset = ((uint64_t)softmax) % ALIGN_BYTES / sizeof(T);
// 1. reduce max
AccT max = ThreadReduce<T, AccT, VecSize, kps::MaxFunctor<AccT>>(
logits,
mid_dim,
input_offset,
-std::numeric_limits<AccT>::infinity(),
kps::MaxFunctor<AccT>());
max = kps::details::BlockXReduce<AccT, kps::MaxFunctor<AccT>>(
max, kps::MaxFunctor<AccT>());
// 2. reduce sum
AccT sum = ThreadReduce<T, AccT, VecSize, ExpAddFunctor<AccT>>(
logits,
mid_dim,
input_offset,
static_cast<AccT>(0),
ExpAddFunctor<AccT>(max));
sum = kps::details::BlockXReduce<AccT, kps::AddFunctor<AccT>>(
sum, kps::AddFunctor<AccT>());
// 3. softmax
phi::LogSoftmaxForwardFunctor<AccT> func(max, sum);
if (input_offset == output_offset) {
VectorizedSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss,
softmax,
logits,
label,
mid_dim,
input_offset,
func,
ignore_index);
} else {
ScalarSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss, softmax, logits, label, mid_dim, func, ignore_index);
}
}
/*
Core function of softmax with cross entropy forward soft label.
The computation includes
- Compute maximum of batch: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
- Compute: sum of - sum_{j}{ label_{i,j} * (src_{i,j} - maxvalue_{i} -
log(sum[i]))}
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements>
__global__ void WarpSoftmaxForwardSoftLabel(T* loss,
T* softmax,
const T* src,
const T* label,
const int batch_size,
const int stride,
const int element_count) {
const bool LogMode = true;
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
int local_batches = batch_size - first_batch;
if (local_batches > kBatchSize) {
local_batches = kBatchSize;
}
// read data from global memory
VecT srcdata[kBatchSize][kIterationsV];
VecT labeldata[kBatchSize][kIterationsV];
for (int i = 0; i < kBatchSize; ++i) {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
const VecT* label_v =
reinterpret_cast<const VecT*>(&label[(first_batch + i) * stride]);
// max index to read
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
// read data
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (src_idx < idx_max_v) {
srcdata[i][it] = src_v[src_idx];
labeldata[i][it] = label_v[src_idx];
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
reinterpret_cast<T*>(&srcdata[i][it])[s] =
-std::numeric_limits<AccT>::max();
reinterpret_cast<T*>(&labeldata[i][it])[s] = 0.0;
}
}
}
}
// compute max value
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
max_value[i] = -std::numeric_limits<AccT>::infinity();
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
T valmax = srcptr_v[0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcptr_v[s]) ? valmax : srcptr_v[s];
}
max_value[i] = (max_value[i] > static_cast<AccT>(valmax))
? max_value[i]
: static_cast<AccT>(valmax);
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum
AccT sum[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += ::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
} else {
srcptr_v[s] = ::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
sum[i] += static_cast<AccT>(srcptr_v[s]);
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// log_softmax and loss
AccT sumloss[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (i >= local_batches) break;
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
// max index to write
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
if (LogMode) {
sum[i] = ::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcvp = reinterpret_cast<T*>(&srcdata[i][it]);
T* labelvp = reinterpret_cast<T*>(&labeldata[i][it]);
VecT tmpv;
T* tmpvp = reinterpret_cast<T*>(&tmpv);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
AccT logsoftmax = static_cast<AccT>(srcvp[s]) - max_value[i] - sum[i];
sumloss[i] -= logsoftmax * static_cast<AccT>(labelvp[s]);
tmpvp[s] = ::exp(logsoftmax);
} else {
tmpvp[s] = static_cast<AccT>(srcvp[s]) / sum[i];
}
}
int idx = threadIdx.x + it * kWarpSize;
if (idx < idx_max_v) {
softmax_v[idx] = tmpv;
}
}
}
// loss
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sumloss);
for (int i = 0; i < kBatchSize; i++) {
if (i >= local_batches) break;
loss[first_batch + i] = sumloss[i];
}
}
#define SOFTMAX_WARP_FORWARD_SOFT_CASE(Log2Elements, VecT, AccT) \
case Log2Elements: \
hipLaunchKernelGGL(( WarpSoftmaxForwardSoftLabel<T, VecT, AccT, Log2Elements>) \
, dim3(blocks), dim3(threads), 0, stream, \
loss, softmax, src, label, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax with cross entropy forward soft label.
*/
template <typename T>
void SwitchWarpSoftmaxForwardSoftLabel(const int blocks,
const dim3 threads,
gpuStream_t stream,
T* loss,
T* softmax,
const T* src,
const T* label,
const int batch_size,
const int stride,
const int element_count,
const int log2_elements) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_SOFT_CASE(0, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(1, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(2, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(3, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(4, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(5, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(6, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(7, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(8, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(9, T, AccT);
default:
break;
}
}
template <typename T>
static void SoftmaxWithCrossEntropySoftLabel(const GPUContext& dev_ctx,
const int rank,
const int axis,
const T* logits_data,
const T* labels_data,
T* softmax_data,
T* loss_data,
int N,
int dim,
int D) {
#ifdef __HIPCC__
constexpr int kMaxBlockDim = 256;
#else
constexpr int kMaxBlockDim = 512;
#endif
int64_t block_dim = dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << static_cast<int>(std::log2(dim)));
int64_t grid_dim = N * D;
constexpr int max_dim = 320;
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
auto stream = dev_ctx.stream();
if (D == 1 && dim <= max_dim) {
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
SwitchWarpSoftmaxForwardSoftLabel<T>(blocks,
threads,
stream,
loss_data,
softmax_data,
logits_data,
labels_data,
N,
dim,
dim,
kDimLog2);
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
GPUDNNDataLayout layout = GPUDNNDataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::miopenSoftmaxForward_V2(
handle,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data,
MIOPEN_SOFTMAX_LOG,
mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSoftmaxForward(
handle,
CUDNN_SOFTMAX_LOG,
mode,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data));
#endif
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (N * D + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
hipLaunchKernelGGL(( CrossEntropySoftLabel<T, T, true>), dim3(blocks), dim3(threads), 0, stream,
loss_data, softmax_data, NULL, labels_data, N, dim, D, kDimLog2);
}
}
/*
Core function of softmax with cross entropy forward
- softmax, SoftmaxMode=kSoftmax
- log softmax, SoftmaxMode=kLogSoftmax
- softmax with cross entropy hard label, SoftmaxMode=kCrossEntropy
The computation includes
- Compute max value: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp: s_{i} = sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
- Compute: softmax_{i,j} = e^{src_{i,j} - maxvalue_{i}} / s_{i}
- Compute: logsoftmax_{i,j} = src_{i,j} - maxvalue_{i} - log(s_{i})
- Compute: loss_{i} = -logsoftmax[i,label[i]] (Hard label)
This computation results from following formula:
softmax_{i,j} = e^{src_{i,j}} / sum_{j}{e^{src_{i,j}}}
= e^{src_{i,j} - maxvalue_{i}}
/ sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
= e^{src_{i,j} - maxvalue_{i}} / s_{i}
logsoftmax_{i,j} = log(softmax_{i,j})
= src_{i,j} - maxvalue_{i} - log(s_{i})
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use
shuffle api to compute max (sum) in one warp.
*/
template <typename T,
typename LabelT,
typename VecT,
typename AccT,
int Log2Elements,
SoftmaxMode mode,
bool IgnoreIndex>
__global__ void WarpSoftmaxForward(T* loss,
T* softmax,
const T* src,
const LabelT* label,
const int batch_size,
const int stride,
const int element_count,
const int ignore_index) {
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
// max index to read
int idx_max_v[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
int idx_max = ((i + first_batch) < batch_size) ? element_count : 0;
idx_max_v[i] = idx_max / kVSize;
}
// read data from global memory
AccT srcdata[kBatchSize][kIterationsV][kVSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// read data to srcdata: - KVSize==1, - KVSize>1
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (src_idx < idx_max_v[i]) {
srcdata[i][it][0] =
static_cast<AccT>(src[(first_batch + i) * stride + src_idx]);
} else {
srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity();
}
} else {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
if (src_idx < idx_max_v[i]) {
VecT srctmp = src_v[src_idx];
const T* srcinptr = reinterpret_cast<const T*>(&srctmp);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]);
}
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity();
}
}
}
}
}
// compute max value: maxvalue_{i} = max_j src_{i,j}
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
AccT valmax = srcdata[i][0][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s];
}
max_value[i] = valmax;
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
AccT valmax = srcdata[i][it][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s];
}
max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax;
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
AccT sum[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = ::exp(srcdata[i][0][0] - max_value[i]);
} else {
srcdata[i][0][0] = ::exp(srcdata[i][0][0] - max_value[i]);
sum[i] = srcdata[i][0][0];
}
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += ::exp(srcdata[i][0][s] - max_value[i]);
} else {
srcdata[i][0][s] = ::exp(srcdata[i][0][s] - max_value[i]);
sum[i] += srcdata[i][0][s];
}
}
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += ::exp(srcdata[i][it][s] - max_value[i]);
} else {
srcdata[i][it][s] = ::exp(srcdata[i][it][s] - max_value[i]);
sum[i] += srcdata[i][it][s];
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write data
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = ::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) { // kVSize==1
if (idx < idx_max_v[i]) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][0] - max_value[i] - sum[i];
// softmax
softmax[(first_batch + i) * stride + idx] = ::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx) {
if (lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] / sum[i];
}
} else {
break;
}
} else { // KVSize>1
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax
tmpptr[s] = ::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize + s;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx && lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
tmpptr[s] = srcdata[i][it][s] / sum[i];
}
}
if (idx < idx_max_v[i]) {
softmax_v[idx] = tmpdata;
} else {
break;
}
}
}
}
}
#define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, LabelT, VecT, AccT) \
case Log2Elements: \
hipLaunchKernelGGL(( WarpSoftmaxForward<T, LabelT, VecT, AccT, Log2Elements, mode, IgnoreIndex>) \
, dim3(blocks), dim3(threads), 0, stream, loss, \
softmax, \
src, \
label, \
batch_size, \
stride, \
element_count, \
ignore_index); \
break;
/*
Wrapper of softmax with cross entropy forward hard label.
*/
template <typename T, typename LabelT, SoftmaxMode mode, bool IgnoreIndex>
void SwitchWarpSoftmaxForward(T* loss,
T* softmax,
const T* src,
const LabelT* label,
const int batch_size,
const int stride,
const int element_count,
const int ignore_index,
gpuStream_t stream) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
// use 128 threads per block to maximimize gpu utilization
const int log2_elements = static_cast<int>(Log2Ceil(element_count));
const int kDimCeil = 1 << log2_elements;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_size + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_CASE(0, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(1, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(2, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(3, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(4, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(5, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(6, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(7, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(8, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(9, LabelT, T, AccT);
default:
break;
}
}
template <typename T, typename LabelT, bool IgnoreIndex>
void LaunchVectorizedSoftmaxForward(T* loss,
T* softmax,
const T* logits,
const LabelT* label,
const int high_dim,
const int mid_dim,
const int ignore_index,
gpuStream_t stream) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
constexpr int vec_size = sizeof(float4) / sizeof(T);
const int max_num_threads = 1024;
int max_block_size = ::min(mid_dim / vec_size, max_num_threads);
if (vec_size > 1) {
max_block_size /= 2;
}
int block_size = 1;
while (block_size < max_block_size) {
block_size *= 2;
}
block_size = ::max(block_size, kps::details::kWarpSize);
dim3 grids(high_dim);
dim3 blocks(block_size);
hipLaunchKernelGGL(( VectorizedSoftmaxForward<T, AccT, LabelT, vec_size, IgnoreIndex>)
, dim3(grids), dim3(blocks), 0, stream,
loss, softmax, logits, label, high_dim, mid_dim, ignore_index);
}
/*
Wrapper of softmax with cross entropy hard label.
- SwitchWarpSoftmaxForward for small size when axis == -1
- LaunchVectorizedSoftmaxForward for large size when axis == -1
- cudnn function for axis != -1
*/
template <typename T, typename LabelT, bool IgnoreIndex>
static void SoftmaxWithCrossEntropyHardLabel(const GPUContext& dev_ctx,
int rank,
int axis,
const T* logits_data,
const LabelT* labels_data,
T* loss_data,
T* softmax_data,
int N,
int dim,
int D,
const int ignore_index) {
auto stream = dev_ctx.stream();
constexpr int max_dim = 320;
if (D == 1) {
if (dim <= max_dim) { // small size
const SoftmaxMode mode = SoftmaxMode::kCrossEntropy;
SwitchWarpSoftmaxForward<T, LabelT, mode, IgnoreIndex>(loss_data,
softmax_data,
logits_data,
labels_data,
N,
dim,
dim,
ignore_index,
stream);
} else { // large size
LaunchVectorizedSoftmaxForward<T, LabelT, IgnoreIndex>(loss_data,
softmax_data,
logits_data,
labels_data,
N,
dim,
ignore_index,
stream);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
GPUDNNDataLayout layout = GPUDNNDataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::miopenSoftmaxForward_V2(
handle,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data,
MIOPEN_SOFTMAX_LOG,
mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSoftmaxForward(
handle,
CUDNN_SOFTMAX_LOG,
mode,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data));
#endif
int threads = 128;
int blocks = (N * dim * D + threads - 1) / threads;
// compute cross entropy, input is log softmax
hipLaunchKernelGGL(( CrossEntropyExpHardLabel<T, LabelT, IgnoreIndex>)
, dim3(blocks), dim3(threads), 0, stream,
loss_data, softmax_data, labels_data, N, dim, D, ignore_index);
}
}
template <typename T, typename LabelT>
void CrossEntropyWithSoftmaxCUDAKernel(const GPUContext& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
bool soft_label,
bool use_softmax,
bool numeric_stable_mode,
int ignore_index,
int axis,
DenseTensor* softmax,
DenseTensor* loss) {
PADDLE_ENFORCE_EQ(
dev_ctx.GetPlace().GetType(),
AllocationType::GPU,
phi::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
// do not with softmax op, and input is softmax
if (!use_softmax) {
DenseTensor* softmax_out = softmax;
const DenseTensor* softmax = &logits;
const DenseTensor& labels = label;
const int rank = softmax->dims().size();
const int axis_v = phi::funcs::CanonicalAxis(axis, rank);
const int axis_dim = softmax->dims()[axis_v];
const int n = phi::funcs::SizeToAxis(axis_v, softmax->dims());
const int d = phi::funcs::SizeFromAxis(axis_v, softmax->dims());
auto* softmax_out_data = dev_ctx.template Alloc<T>(softmax_out);
auto* loss_data = dev_ctx.template Alloc<T>(loss);
phi::funcs::SetConstant<GPUContext, T> set_constant;
set_constant(dev_ctx, loss, static_cast<T>(0));
if (axis_dim == 1) {
set_constant(dev_ctx, softmax_out, static_cast<T>(1));
return;
}
DenseTensor softmax_2d(*softmax);
softmax_2d.Resize({n, d});
DenseTensor labels_2d(labels);
labels_2d.Resize({n, labels.numel() / n});
DenseTensor loss_2d(*loss);
loss_2d.Resize({n, 1});
DenseTensor softmax_out_2d(*softmax_out);
softmax_out_2d.Resize({n, d});
// math::CrossEntropyFunctor support axis is the last
if (axis_v == -1) {
paddle::operators::math::CrossEntropyFunctor<GPUContext, T>()(
dev_ctx,
&loss_2d,
&softmax_2d,
&labels_2d,
soft_label,
ignore_index,
axis_dim);
return;
}
// if axis is not the last, we need a new impliment
if (soft_label) {
auto* logits_data = softmax->data<T>();
auto* labels_data = labels.data<T>();
const int kDimLog2 = static_cast<int>(Log2Ceil(axis_dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (n * d + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
hipLaunchKernelGGL(( CrossEntropySoftLabel<T, T, false>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_data,
NULL,
logits_data,
labels_data,
n,
axis_dim,
d / axis_dim,
kDimLog2);
} else { // HardLabel
auto* logits_data = softmax->data<T>();
auto* labels_data = labels.data<LabelT>();
int threads = 128;
int blocks = (n * d / axis_dim + threads - 1) / threads;
if (ignore_index >= 0 && ignore_index < axis_dim) {
hipLaunchKernelGGL(( CrossEntropyHardLabel<T, LabelT, true>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_data,
logits_data,
labels_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
} else {
hipLaunchKernelGGL(( CrossEntropyHardLabel<T, LabelT, false>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_data,
logits_data,
labels_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
}
}
// cause of input is softmax
// copy to output softmax, directly
phi::Copy<GPUContext>(
dev_ctx, *softmax, dev_ctx.GetPlace(), false, softmax_out);
return;
}
const int rank = logits.dims().size();
const int axis_v = phi::funcs::CanonicalAxis(axis, rank);
int axis_dim = logits.dims()[axis_v];
const int64_t n = phi::funcs::SizeToAxis(axis_v, logits.dims());
const int64_t d = phi::funcs::SizeFromAxis(axis_v, logits.dims());
auto* softmax_data = dev_ctx.template Alloc<T>(softmax);
auto* loss_data = dev_ctx.template Alloc<T>(loss);
if (axis_dim == 1) {
phi::funcs::SetConstant<GPUContext, T> set_constant;
set_constant(dev_ctx, softmax, static_cast<T>(1));
set_constant(dev_ctx, loss, static_cast<T>(0));
return;
}
if (soft_label) {
auto* logits_data = logits.data<T>();
auto* labels_data = label.data<T>();
SoftmaxWithCrossEntropySoftLabel<T>(dev_ctx,
rank,
axis_v,
logits_data,
labels_data,
softmax_data,
loss_data,
n,
axis_dim,
d / axis_dim);
} else {
if (!numeric_stable_mode) {
// CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim
DenseTensor logits_2d(logits);
logits_2d.Resize({n, d});
DenseTensor softmax_2d(*softmax);
softmax_2d.Resize({n, d});
DenseTensor labels_2d(label);
labels_2d.Resize({n, label.numel() / n});
DenseTensor loss_2d(*loss);
loss_2d.Resize({n, 1});
paddle::operators::math::SoftmaxCUDNNFunctor<T, GPUContext>()(
dev_ctx, &logits_2d, &softmax_2d);
paddle::operators::math::CrossEntropyFunctor<GPUContext, T>()(
dev_ctx,
&loss_2d,
&softmax_2d,
&labels_2d,
false,
ignore_index,
axis_dim);
} else {
auto* logits_data = logits.data<T>();
auto* labels_data = label.data<LabelT>();
if (ignore_index >= 0 && ignore_index < axis_dim) {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, true>(dev_ctx,
rank,
axis_v,
logits_data,
labels_data,
loss_data,
softmax_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
} else {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, false>(dev_ctx,
rank,
axis_v,
logits_data,
labels_data,
loss_data,
softmax_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
}
}
}
}
template <typename T, typename Context>
void CrossEntropyWithSoftmaxKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
bool soft_label,
bool use_softmax,
bool numeric_stable_mode,
int ignore_index,
int axis,
DenseTensor* softmax,
DenseTensor* loss) {
auto dtype = label.dtype();
if (soft_label) {
PADDLE_ENFORCE_EQ(
dtype,
paddle::experimental::CppTypeToDataType<T>::Type(),
phi::errors::InvalidArgument("The Input(Label) should be with the "
"same data type as Input(Logits)."));
CrossEntropyWithSoftmaxCUDAKernel<T, T>(dev_ctx,
logits,
label,
soft_label,
use_softmax,
numeric_stable_mode,
ignore_index,
axis,
softmax,
loss);
} else {
PD_VISIT_INTEGRAL_TYPES(dtype, "CrossEntropyWithSoftmaxCUDAKernel", ([&] {
CrossEntropyWithSoftmaxCUDAKernel<T, data_t>(
dev_ctx,
logits,
label,
soft_label,
use_softmax,
numeric_stable_mode,
ignore_index,
axis,
softmax,
loss);
}));
}
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(cross_entropy_with_softmax,
GPU,
ALL_LAYOUT,
phi::CrossEntropyWithSoftmaxKernel,
float,
phi::dtype::float16) {}
#else
PD_REGISTER_KERNEL(cross_entropy_with_softmax,
GPU,
ALL_LAYOUT,
phi::CrossEntropyWithSoftmaxKernel,
float,
double,
phi::dtype::float16) {}
#endif
| 245020cab113f5b5a6a828df2c0bd5d31d723a3c.cu | /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/kernels/cross_entropy_kernel.h"
#ifdef __NVCC__
#include "cub/cub.cuh"
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/math/cross_entropy.h"
#include "paddle/fluid/operators/math/softmax.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/visit_type.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpudnn/softmax_gpudnn.h"
namespace phi {
#define ALIGN_BYTES 16
enum class SoftmaxMode { kSoftmax, kLogSoftmax, kCrossEntropy };
// Wrapper of log function. Use log(float32) for float16
template <typename T>
static __device__ __forceinline__ T Log(T x) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
AccT logx = std::log(static_cast<AccT>(x));
return paddle::operators::math::TolerableValue<T>()(static_cast<T>(logx));
}
// Wrapper of exp function. Use exp(float32) for float16
template <typename T>
static __device__ __forceinline__ T Exp(T x) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
AccT expx = std::exp(static_cast<AccT>(x));
return paddle::operators::math::TolerableValue<T>()(static_cast<T>(expx));
}
template <typename Tx, typename Ty = Tx>
struct ExpAddFunctor {
HOSTDEVICE inline ExpAddFunctor(Tx max) : max(max) {}
HOSTDEVICE inline Ty operator()(const Tx& sum, const Tx& x) const {
return static_cast<Ty>(sum + std::exp(x - max));
}
private:
Tx max;
};
/*
Cross entropy soft label with dynamic size on axis (log2_elements is
varibale).
- if the input is softmax,compute loss with softmax
- if the input is log_softmax, compute loss with log_softmax and update
softmax
*/
template <typename T, typename VecT, bool InLogMode = false>
__global__ void CrossEntropySoftLabel(T* loss,
T* softmaxwrt,
const T* softmax,
const T* labels,
const int n,
const int dim,
const int d,
int log2_elements) {
const int kDimCeil = 1 << log2_elements;
const int kVSize = sizeof(VecT) / sizeof(T);
#ifdef __HIPCC__
const int kThreadPerBlock = 256;
#else
const int kThreadPerBlock = 512;
#endif
const int kBatchPerBlock = 1;
const int kWarpSize = 32; // (dim < 32) ? dim : 32;
const int kBatchSize = 1;
const int kThreadPerBatch = kThreadPerBlock / kBatchPerBlock;
const int kWarpPerBatch = kThreadPerBatch / kWarpSize;
const int kIterations = (dim + kThreadPerBatch - 1) / kThreadPerBatch;
const int kIterationsV = (kIterations >= kVSize) ? (kIterations / kVSize) : 1;
const int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
T sum[kBatchSize]{static_cast<T>(0.0)};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
int ids = first_batch + i;
if (ids >= n * d) break;
int idx_n = ids / d;
int idx_d = ids % d;
#pragma unroll
for (int it = 0; it < kIterations; ++it) {
int idx_dim = it * kThreadPerBatch + threadIdx.x;
int idx = idx_n * dim * d + idx_dim * d + idx_d;
if (idx_n < n && idx_dim < dim) {
VecT softmaxdata;
if (InLogMode) {
softmaxdata = reinterpret_cast<VecT*>(&softmaxwrt[idx])[0];
} else {
softmaxdata = reinterpret_cast<const VecT*>(&softmax[idx])[0];
}
VecT labelsdata = reinterpret_cast<const VecT*>(&labels[idx])[0];
T* softmaxptr = reinterpret_cast<T*>(&softmaxdata);
T* labelsptr = reinterpret_cast<T*>(&labelsdata);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
if (InLogMode) {
sum[i] -= softmaxptr[s] * labelsptr[s];
softmaxptr[s] = Exp(softmaxptr[s]);
} else {
sum[i] -= Log(softmaxptr[s]) * labelsptr[s];
}
}
if (InLogMode) {
reinterpret_cast<VecT*>(&softmaxwrt[idx])[0] = softmaxdata;
}
}
}
}
phi::WarpReduceSum<T, kBatchSize, kWarpSize>(sum);
__syncthreads();
__shared__ T sumshare[kWarpPerBatch][kBatchPerBlock][kBatchSize];
if (threadIdx.x % kWarpSize == 0) {
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
sumshare[threadIdx.x / kWarpSize][threadIdx.y][i] = sum[i];
}
}
__syncthreads();
// write
if (threadIdx.x == 0) {
for (int i = 0; i < kBatchSize; i++) {
int ids = first_batch + i;
if (ids < n * d) {
loss[ids] = sumshare[0][threadIdx.y][i];
for (int s = 1; s < kWarpPerBatch; s++) {
loss[ids] += sumshare[s][threadIdx.y][i];
}
}
}
}
}
/*
Hard label cross entropy.
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyHardLabel(T* loss,
const T* softmax,
const LabelT* labels,
const int n,
const int dim,
const int d,
const int ignore_idx) {
int64_t ids = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = ids / d;
int64_t idx_d = ids % d;
// thread ids compute loss[ids] using softmax[idx]
if (ids < n * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (lbl < 0) { // label is negative
loss[ids] = static_cast<T>(0.0);
} else { // label is positive of zero
int64_t idx = idx_n * dim * d + lbl * d + idx_d;
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -Log(softmax[idx]);
}
} else {
// IgnoreIndex is false
loss[ids] = -Log(softmax[idx]);
}
}
}
}
/*
Hard label cross entropy with exp.
Input: log softmax
Output: loss and exp(input)
*/
template <typename T, typename LabelT, bool IgnoreIndex>
__global__ void CrossEntropyExpHardLabel(T* loss,
T* softmax,
const LabelT* labels,
const int n,
const int dim,
const int d,
const int ignore_idx) {
int64_t idx = blockIdx.x * blockDim.x + threadIdx.x;
int64_t idx_n = idx / (d * dim);
int64_t idx_dim = (idx / d) % dim;
int64_t idx_d = idx % d;
int64_t ids = idx_n * d + idx_d;
if (idx < n * dim * d) {
auto lbl = static_cast<int64_t>(labels[ids]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (idx_dim == lbl) {
if (lbl == ignore_idx) {
loss[ids] = static_cast<T>(0.0);
} else {
loss[ids] = -softmax[idx];
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < dim) {
if (lbl == idx_dim) {
loss[ids] = -softmax[idx];
}
} else {
loss[ids] = static_cast<T>(0.0);
}
}
softmax[idx] = Exp(softmax[idx]);
}
}
template <typename T, typename AccT, int VecSize, class ReduceFunctor>
__device__ __forceinline__ AccT ThreadReduce(const T* input,
int size,
const int offset,
AccT init,
ReduceFunctor reducer) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
AccT val = init;
if (offset > 0) {
input -= offset;
size += offset;
if (tid >= offset) {
val = reducer(val, input[tid]);
}
size -= blockDim.x;
input += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
*ins_vec = reinterpret_cast<const VecT*>(input)[tid];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
val = reducer(val, ins[i]);
}
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
val = reducer(val, input[tid]);
}
return val;
}
template <typename T, bool IgnoreIndex>
__device__ __forceinline__ void ComputeLoss(T* loss,
const T loss_value,
const int label_id,
const int64_t label_value,
const int tid,
const int vec_size,
const int offset,
const int ignore_index) {
int loss_id = vec_size * tid + offset;
if (IgnoreIndex) {
if (label_value == loss_id) {
if (label_value == ignore_index) {
loss[label_id] = static_cast<T>(0.0f);
} else {
loss[label_id] = loss_value;
}
}
} else {
if (label_value == loss_id) {
loss[label_id] = loss_value;
}
}
}
template <typename T,
typename AccT,
typename LabelT,
int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void VectorizedSoftmaxForwardImpl(
T* loss,
T* softmax,
const T* logits,
const LabelT* label,
int size,
const int offset,
const phi::LogSoftmaxForwardFunctor<AccT>& func,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
int tid = threadIdx.x;
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
int loss_id_offset = 0;
if (offset > 0) {
logits -= offset;
softmax -= offset;
size += offset;
loss_id_offset -= offset;
if (tid >= offset) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
1,
loss_id_offset,
ignore_index);
}
}
size -= blockDim.x;
logits += blockDim.x;
softmax += blockDim.x;
loss_id_offset += blockDim.x;
}
int remain = size % (VecSize * blockDim.x);
T ins[VecSize];
T outs[VecSize];
VecT* ins_vec = reinterpret_cast<VecT*>(&ins);
VecT* outs_vec = reinterpret_cast<VecT*>(&outs);
// vector part
for (; VecSize * tid < (size - remain); tid += blockDim.x) {
// read
*ins_vec = reinterpret_cast<const VecT*>(logits)[tid];
#pragma unroll
// compute
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
outs[i] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
VecSize,
loss_id_offset + i,
ignore_index);
}
}
// write
reinterpret_cast<VecT*>(softmax)[tid] = *outs_vec;
}
// scalar part
tid = size - remain + threadIdx.x;
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
1,
loss_id_offset,
ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T,
typename AccT,
typename LabelT,
int VecSize,
bool IgnoreIndex>
__device__ __forceinline__ void ScalarSoftmaxForwardImpl(
T* loss,
T* softmax,
const T* logits,
const LabelT* label,
const int size,
const phi::LogSoftmaxForwardFunctor<AccT>& func,
const int ignore_index) {
int tid = threadIdx.x;
int remain = size % (VecSize * blockDim.x);
int label_id = blockIdx.x;
auto label_value = static_cast<int64_t>(label[label_id]);
const bool label_valid = label_value >= 0 && label_value < size;
// main part
for (; tid < (size - remain); tid += VecSize * blockDim.x) {
T ins[VecSize];
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
ins[i] = logits[tid + i * blockDim.x];
}
#pragma unroll
for (int i = 0; i < VecSize; ++i) {
AccT log_softmax = func(static_cast<AccT>(ins[i]));
softmax[tid + i * blockDim.x] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
VecSize,
i,
ignore_index);
}
}
}
// tail part
for (; tid < size; tid += blockDim.x) {
AccT log_softmax = func(static_cast<AccT>(logits[tid]));
softmax[tid] = static_cast<T>(std::exp(log_softmax));
// loss
if (label_valid) {
ComputeLoss<T, IgnoreIndex>(loss,
static_cast<T>(-log_softmax),
label_id,
label_value,
tid,
1,
0,
ignore_index);
}
}
// invalid label, write once
if (!label_valid && threadIdx.x == 0) {
loss[label_id] = static_cast<T>(0.0f);
}
}
template <typename T,
typename AccT,
typename LabelT,
int VecSize,
bool IgnoreIndex>
__global__ void VectorizedSoftmaxForward(T* loss,
T* softmax,
const T* logits,
const LabelT* label,
const int high_dim,
const int mid_dim,
const int ignore_index) {
using VecT = kps::details::VectorType<T, VecSize>;
// each block deal with one batch
logits += blockIdx.x * mid_dim;
softmax += blockIdx.x * mid_dim;
const int input_offset = ((uint64_t)logits) % ALIGN_BYTES / sizeof(T);
const int output_offset = ((uint64_t)softmax) % ALIGN_BYTES / sizeof(T);
// 1. reduce max
AccT max = ThreadReduce<T, AccT, VecSize, kps::MaxFunctor<AccT>>(
logits,
mid_dim,
input_offset,
-std::numeric_limits<AccT>::infinity(),
kps::MaxFunctor<AccT>());
max = kps::details::BlockXReduce<AccT, kps::MaxFunctor<AccT>>(
max, kps::MaxFunctor<AccT>());
// 2. reduce sum
AccT sum = ThreadReduce<T, AccT, VecSize, ExpAddFunctor<AccT>>(
logits,
mid_dim,
input_offset,
static_cast<AccT>(0),
ExpAddFunctor<AccT>(max));
sum = kps::details::BlockXReduce<AccT, kps::AddFunctor<AccT>>(
sum, kps::AddFunctor<AccT>());
// 3. softmax
phi::LogSoftmaxForwardFunctor<AccT> func(max, sum);
if (input_offset == output_offset) {
VectorizedSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss,
softmax,
logits,
label,
mid_dim,
input_offset,
func,
ignore_index);
} else {
ScalarSoftmaxForwardImpl<T, AccT, LabelT, VecSize, IgnoreIndex>(
loss, softmax, logits, label, mid_dim, func, ignore_index);
}
}
/*
Core function of softmax with cross entropy forward soft label.
The computation includes
- Compute maximum of batch: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp batch: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
- Compute: sum of - sum_{j}{ label_{i,j} * (src_{i,j} - maxvalue_{i} -
log(sum[i]))}
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use shuffle
api to compute max (sum) in one warp.
*/
template <typename T, typename VecT, typename AccT, int Log2Elements>
__global__ void WarpSoftmaxForwardSoftLabel(T* loss,
T* softmax,
const T* src,
const T* label,
const int batch_size,
const int stride,
const int element_count) {
const bool LogMode = true;
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
int local_batches = batch_size - first_batch;
if (local_batches > kBatchSize) {
local_batches = kBatchSize;
}
// read data from global memory
VecT srcdata[kBatchSize][kIterationsV];
VecT labeldata[kBatchSize][kIterationsV];
for (int i = 0; i < kBatchSize; ++i) {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
const VecT* label_v =
reinterpret_cast<const VecT*>(&label[(first_batch + i) * stride]);
// max index to read
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
// read data
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (src_idx < idx_max_v) {
srcdata[i][it] = src_v[src_idx];
labeldata[i][it] = label_v[src_idx];
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
reinterpret_cast<T*>(&srcdata[i][it])[s] =
-std::numeric_limits<AccT>::max();
reinterpret_cast<T*>(&labeldata[i][it])[s] = 0.0;
}
}
}
}
// compute max value
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
max_value[i] = -std::numeric_limits<AccT>::infinity();
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
T valmax = srcptr_v[0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcptr_v[s]) ? valmax : srcptr_v[s];
}
max_value[i] = (max_value[i] > static_cast<AccT>(valmax))
? max_value[i]
: static_cast<AccT>(valmax);
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum
AccT sum[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcptr_v = reinterpret_cast<T*>(&srcdata[i][it]);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
sum[i] += std::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
} else {
srcptr_v[s] = std::exp(static_cast<AccT>(srcptr_v[s]) - max_value[i]);
sum[i] += static_cast<AccT>(srcptr_v[s]);
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// log_softmax and loss
AccT sumloss[kBatchSize]{0.0};
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (i >= local_batches) break;
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
// max index to write
int idx_max = (i < local_batches) ? element_count : 0;
int idx_max_v = idx_max / kVSize;
if (LogMode) {
sum[i] = std::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
T* srcvp = reinterpret_cast<T*>(&srcdata[i][it]);
T* labelvp = reinterpret_cast<T*>(&labeldata[i][it]);
VecT tmpv;
T* tmpvp = reinterpret_cast<T*>(&tmpv);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (LogMode) {
AccT logsoftmax = static_cast<AccT>(srcvp[s]) - max_value[i] - sum[i];
sumloss[i] -= logsoftmax * static_cast<AccT>(labelvp[s]);
tmpvp[s] = std::exp(logsoftmax);
} else {
tmpvp[s] = static_cast<AccT>(srcvp[s]) / sum[i];
}
}
int idx = threadIdx.x + it * kWarpSize;
if (idx < idx_max_v) {
softmax_v[idx] = tmpv;
}
}
}
// loss
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sumloss);
for (int i = 0; i < kBatchSize; i++) {
if (i >= local_batches) break;
loss[first_batch + i] = sumloss[i];
}
}
#define SOFTMAX_WARP_FORWARD_SOFT_CASE(Log2Elements, VecT, AccT) \
case Log2Elements: \
WarpSoftmaxForwardSoftLabel<T, VecT, AccT, Log2Elements> \
<<<blocks, threads, 0, stream>>>( \
loss, softmax, src, label, batch_size, stride, element_count); \
break;
/*
Wrapper of softmax with cross entropy forward soft label.
*/
template <typename T>
void SwitchWarpSoftmaxForwardSoftLabel(const int blocks,
const dim3 threads,
gpuStream_t stream,
T* loss,
T* softmax,
const T* src,
const T* label,
const int batch_size,
const int stride,
const int element_count,
const int log2_elements) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_SOFT_CASE(0, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(1, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(2, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(3, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(4, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(5, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(6, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(7, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(8, T, AccT);
SOFTMAX_WARP_FORWARD_SOFT_CASE(9, T, AccT);
default:
break;
}
}
template <typename T>
static void SoftmaxWithCrossEntropySoftLabel(const GPUContext& dev_ctx,
const int rank,
const int axis,
const T* logits_data,
const T* labels_data,
T* softmax_data,
T* loss_data,
int N,
int dim,
int D) {
#ifdef __HIPCC__
constexpr int kMaxBlockDim = 256;
#else
constexpr int kMaxBlockDim = 512;
#endif
int64_t block_dim = dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << static_cast<int>(std::log2(dim)));
int64_t grid_dim = N * D;
constexpr int max_dim = 320;
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
auto stream = dev_ctx.stream();
if (D == 1 && dim <= max_dim) {
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
// use 128 threads per block to maximimize gpu utilization
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (N + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
SwitchWarpSoftmaxForwardSoftLabel<T>(blocks,
threads,
stream,
loss_data,
softmax_data,
logits_data,
labels_data,
N,
dim,
dim,
kDimLog2);
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
GPUDNNDataLayout layout = GPUDNNDataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::miopenSoftmaxForward_V2(
handle,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data,
MIOPEN_SOFTMAX_LOG,
mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSoftmaxForward(
handle,
CUDNN_SOFTMAX_LOG,
mode,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data));
#endif
const int kDimLog2 = static_cast<int>(Log2Ceil(dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (N * D + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
CrossEntropySoftLabel<T, T, true><<<blocks, threads, 0, stream>>>(
loss_data, softmax_data, NULL, labels_data, N, dim, D, kDimLog2);
}
}
/*
Core function of softmax with cross entropy forward
- softmax, SoftmaxMode=kSoftmax
- log softmax, SoftmaxMode=kLogSoftmax
- softmax with cross entropy hard label, SoftmaxMode=kCrossEntropy
The computation includes
- Compute max value: maxvalue_{i} = max_j src_{i,j}
- Compute sum of exp: s_{i} = sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
- Compute: softmax_{i,j} = e^{src_{i,j} - maxvalue_{i}} / s_{i}
- Compute: logsoftmax_{i,j} = src_{i,j} - maxvalue_{i} - log(s_{i})
- Compute: loss_{i} = -logsoftmax[i,label[i]] (Hard label)
This computation results from following formula:
softmax_{i,j} = e^{src_{i,j}} / sum_{j}{e^{src_{i,j}}}
= e^{src_{i,j} - maxvalue_{i}}
/ sum_{j}{e^{src_{i,j} - maxvalue_{i}}}
= e^{src_{i,j} - maxvalue_{i}} / s_{i}
logsoftmax_{i,j} = log(softmax_{i,j})
= src_{i,j} - maxvalue_{i} - log(s_{i})
One warp (32 threads) is used to compute 1 or 2 batch (kBatchSize).
For reduction max (sum), firstly compute max (sum) to one warp, then use
shuffle api to compute max (sum) in one warp.
*/
template <typename T,
typename LabelT,
typename VecT,
typename AccT,
int Log2Elements,
SoftmaxMode mode,
bool IgnoreIndex>
__global__ void WarpSoftmaxForward(T* loss,
T* softmax,
const T* src,
const LabelT* label,
const int batch_size,
const int stride,
const int element_count,
const int ignore_index) {
constexpr int kDimCeil = 1 << Log2Elements;
constexpr int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
constexpr int kVSize = sizeof(VecT) / sizeof(T);
constexpr int kIterations = kDimCeil / kWarpSize;
constexpr int kIterationsV =
(kIterations >= kVSize) ? (kIterations / kVSize) : 1;
constexpr int kBatchSize = (kDimCeil <= 128) ? 2 : 1;
int first_batch = (blockDim.y * blockIdx.x + threadIdx.y) * kBatchSize;
// max index to read
int idx_max_v[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; i++) {
int idx_max = ((i + first_batch) < batch_size) ? element_count : 0;
idx_max_v[i] = idx_max / kVSize;
}
// read data from global memory
AccT srcdata[kBatchSize][kIterationsV][kVSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// read data to srcdata: - KVSize==1, - KVSize>1
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int src_idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) {
if (src_idx < idx_max_v[i]) {
srcdata[i][it][0] =
static_cast<AccT>(src[(first_batch + i) * stride + src_idx]);
} else {
srcdata[i][it][0] = -std::numeric_limits<AccT>::infinity();
}
} else {
const VecT* src_v =
reinterpret_cast<const VecT*>(&src[(first_batch + i) * stride]);
if (src_idx < idx_max_v[i]) {
VecT srctmp = src_v[src_idx];
const T* srcinptr = reinterpret_cast<const T*>(&srctmp);
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = static_cast<AccT>(srcinptr[s]);
}
} else {
#pragma unroll
for (int s = 0; s < kVSize; s++) {
srcdata[i][it][s] = -std::numeric_limits<AccT>::infinity();
}
}
}
}
}
// compute max value: maxvalue_{i} = max_j src_{i,j}
AccT max_value[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
AccT valmax = srcdata[i][0][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][0][s]) ? valmax : srcdata[i][0][s];
}
max_value[i] = valmax;
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
AccT valmax = srcdata[i][it][0];
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
valmax = (valmax > srcdata[i][it][s]) ? valmax : srcdata[i][it][s];
}
max_value[i] = (max_value[i] > valmax) ? max_value[i] : valmax;
}
}
phi::WarpReduceMax<AccT, kBatchSize, kWarpSize>(max_value);
// compute sum: s_{i} = sum_{j}{ exp(src_{i,j} - maxvalue_{i} }
AccT sum[kBatchSize];
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
// it = 0
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = std::exp(srcdata[i][0][0] - max_value[i]);
} else {
srcdata[i][0][0] = std::exp(srcdata[i][0][0] - max_value[i]);
sum[i] = srcdata[i][0][0];
}
#pragma unroll
for (int s = 1; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += std::exp(srcdata[i][0][s] - max_value[i]);
} else {
srcdata[i][0][s] = std::exp(srcdata[i][0][s] - max_value[i]);
sum[i] += srcdata[i][0][s];
}
}
// it = 1, 2, ...
#pragma unroll
for (int it = 1; it < kIterationsV; ++it) {
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] += std::exp(srcdata[i][it][s] - max_value[i]);
} else {
srcdata[i][it][s] = std::exp(srcdata[i][it][s] - max_value[i]);
sum[i] += srcdata[i][it][s];
}
}
}
}
phi::WarpReduceSum<AccT, kBatchSize, kWarpSize>(sum);
// write data
#pragma unroll
for (int i = 0; i < kBatchSize; ++i) {
if (mode == SoftmaxMode::kLogSoftmax ||
mode == SoftmaxMode::kCrossEntropy) {
sum[i] = std::log(sum[i]);
}
#pragma unroll
for (int it = 0; it < kIterationsV; ++it) {
int idx = threadIdx.x + it * kWarpSize;
if (kVSize == 1) { // kVSize==1
if (idx < idx_max_v[i]) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][0] - max_value[i] - sum[i];
// softmax
softmax[(first_batch + i) * stride + idx] = std::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx) {
if (lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
softmax[(first_batch + i) * stride + idx] =
srcdata[i][it][0] / sum[i];
}
} else {
break;
}
} else { // KVSize>1
VecT* softmax_v =
reinterpret_cast<VecT*>(&softmax[(first_batch + i) * stride]);
VecT tmpdata;
T* tmpptr = reinterpret_cast<T*>(&tmpdata);
#pragma unroll
for (int s = 0; s < kVSize; ++s) {
if (mode == SoftmaxMode::kLogSoftmax) { // log softmax
tmpptr[s] = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax with cross entropy hard label
} else if (mode == SoftmaxMode::kCrossEntropy) {
AccT logsoftmax = srcdata[i][it][s] - max_value[i] - sum[i];
// softmax
tmpptr[s] = std::exp(logsoftmax);
// label
int loss_idx = (threadIdx.x + it * kWarpSize) * kVSize + s;
auto lbl = static_cast<int64_t>(label[first_batch + i]);
if (IgnoreIndex == true) {
// IgnoreIndex is true
if (lbl == loss_idx && lbl != ignore_index) {
loss[first_batch + i] = -logsoftmax;
}
} else {
// IgnoreIndex is false
if (lbl >= 0 && lbl < element_count) {
if (lbl == loss_idx) {
loss[first_batch + i] = -logsoftmax;
}
} else {
loss[first_batch + i] = static_cast<T>(0.0);
}
}
} else { // softmax
tmpptr[s] = srcdata[i][it][s] / sum[i];
}
}
if (idx < idx_max_v[i]) {
softmax_v[idx] = tmpdata;
} else {
break;
}
}
}
}
}
#define SOFTMAX_WARP_FORWARD_CASE(Log2Elements, LabelT, VecT, AccT) \
case Log2Elements: \
WarpSoftmaxForward<T, LabelT, VecT, AccT, Log2Elements, mode, IgnoreIndex> \
<<<blocks, threads, 0, stream>>>(loss, \
softmax, \
src, \
label, \
batch_size, \
stride, \
element_count, \
ignore_index); \
break;
/*
Wrapper of softmax with cross entropy forward hard label.
*/
template <typename T, typename LabelT, SoftmaxMode mode, bool IgnoreIndex>
void SwitchWarpSoftmaxForward(T* loss,
T* softmax,
const T* src,
const LabelT* label,
const int batch_size,
const int stride,
const int element_count,
const int ignore_index,
gpuStream_t stream) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
// use 128 threads per block to maximimize gpu utilization
const int log2_elements = static_cast<int>(Log2Ceil(element_count));
const int kDimCeil = 1 << log2_elements;
int kWarpSize = (kDimCeil < 32) ? kDimCeil : 32;
int batches_per_warp = (kDimCeil <= 128) ? 2 : 1;
constexpr int threads_per_block = 128;
int warps_per_block = (threads_per_block / kWarpSize);
int batches_per_block = warps_per_block * batches_per_warp;
int blocks = (batch_size + batches_per_block - 1) / batches_per_block;
dim3 threads(kWarpSize, warps_per_block, 1);
switch (log2_elements) {
SOFTMAX_WARP_FORWARD_CASE(0, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(1, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(2, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(3, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(4, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(5, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(6, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(7, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(8, LabelT, T, AccT);
SOFTMAX_WARP_FORWARD_CASE(9, LabelT, T, AccT);
default:
break;
}
}
template <typename T, typename LabelT, bool IgnoreIndex>
void LaunchVectorizedSoftmaxForward(T* loss,
T* softmax,
const T* logits,
const LabelT* label,
const int high_dim,
const int mid_dim,
const int ignore_index,
gpuStream_t stream) {
using AccT = typename dtype::MPTypeTrait<T>::Type;
constexpr int vec_size = sizeof(float4) / sizeof(T);
const int max_num_threads = 1024;
int max_block_size = std::min(mid_dim / vec_size, max_num_threads);
if (vec_size > 1) {
max_block_size /= 2;
}
int block_size = 1;
while (block_size < max_block_size) {
block_size *= 2;
}
block_size = std::max(block_size, kps::details::kWarpSize);
dim3 grids(high_dim);
dim3 blocks(block_size);
VectorizedSoftmaxForward<T, AccT, LabelT, vec_size, IgnoreIndex>
<<<grids, blocks, 0, stream>>>(
loss, softmax, logits, label, high_dim, mid_dim, ignore_index);
}
/*
Wrapper of softmax with cross entropy hard label.
- SwitchWarpSoftmaxForward for small size when axis == -1
- LaunchVectorizedSoftmaxForward for large size when axis == -1
- cudnn function for axis != -1
*/
template <typename T, typename LabelT, bool IgnoreIndex>
static void SoftmaxWithCrossEntropyHardLabel(const GPUContext& dev_ctx,
int rank,
int axis,
const T* logits_data,
const LabelT* labels_data,
T* loss_data,
T* softmax_data,
int N,
int dim,
int D,
const int ignore_index) {
auto stream = dev_ctx.stream();
constexpr int max_dim = 320;
if (D == 1) {
if (dim <= max_dim) { // small size
const SoftmaxMode mode = SoftmaxMode::kCrossEntropy;
SwitchWarpSoftmaxForward<T, LabelT, mode, IgnoreIndex>(loss_data,
softmax_data,
logits_data,
labels_data,
N,
dim,
dim,
ignore_index,
stream);
} else { // large size
LaunchVectorizedSoftmaxForward<T, LabelT, IgnoreIndex>(loss_data,
softmax_data,
logits_data,
labels_data,
N,
dim,
ignore_index,
stream);
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
GPUDNNDataLayout layout = GPUDNNDataLayout::kNCHW;
#ifdef PADDLE_WITH_HIP
miopenTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#else
cudnnTensorDescriptor_t descp = desc.descriptor<T>(layout, tensor_dims);
#endif
auto handle = dev_ctx.cudnn_handle();
#ifdef PADDLE_WITH_HIP
auto mode = axis == rank - 1 ? MIOPEN_SOFTMAX_MODE_INSTANCE
: MIOPEN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::miopenSoftmaxForward_V2(
handle,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data,
MIOPEN_SOFTMAX_LOG,
mode));
#else
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_GPU_SUCCESS(phi::dynload::cudnnSoftmaxForward(
handle,
CUDNN_SOFTMAX_LOG,
mode,
paddle::platform::CudnnDataType<T>::kOne(),
descp,
logits_data,
paddle::platform::CudnnDataType<T>::kZero(),
descp,
softmax_data));
#endif
int threads = 128;
int blocks = (N * dim * D + threads - 1) / threads;
// compute cross entropy, input is log softmax
CrossEntropyExpHardLabel<T, LabelT, IgnoreIndex>
<<<blocks, threads, 0, stream>>>(
loss_data, softmax_data, labels_data, N, dim, D, ignore_index);
}
}
template <typename T, typename LabelT>
void CrossEntropyWithSoftmaxCUDAKernel(const GPUContext& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
bool soft_label,
bool use_softmax,
bool numeric_stable_mode,
int ignore_index,
int axis,
DenseTensor* softmax,
DenseTensor* loss) {
PADDLE_ENFORCE_EQ(
dev_ctx.GetPlace().GetType(),
AllocationType::GPU,
phi::errors::Unavailable("softmax_with_cross_entropy operator's "
"CUDA kernel only runs on GPU device."));
// do not with softmax op, and input is softmax
if (!use_softmax) {
DenseTensor* softmax_out = softmax;
const DenseTensor* softmax = &logits;
const DenseTensor& labels = label;
const int rank = softmax->dims().size();
const int axis_v = phi::funcs::CanonicalAxis(axis, rank);
const int axis_dim = softmax->dims()[axis_v];
const int n = phi::funcs::SizeToAxis(axis_v, softmax->dims());
const int d = phi::funcs::SizeFromAxis(axis_v, softmax->dims());
auto* softmax_out_data = dev_ctx.template Alloc<T>(softmax_out);
auto* loss_data = dev_ctx.template Alloc<T>(loss);
phi::funcs::SetConstant<GPUContext, T> set_constant;
set_constant(dev_ctx, loss, static_cast<T>(0));
if (axis_dim == 1) {
set_constant(dev_ctx, softmax_out, static_cast<T>(1));
return;
}
DenseTensor softmax_2d(*softmax);
softmax_2d.Resize({n, d});
DenseTensor labels_2d(labels);
labels_2d.Resize({n, labels.numel() / n});
DenseTensor loss_2d(*loss);
loss_2d.Resize({n, 1});
DenseTensor softmax_out_2d(*softmax_out);
softmax_out_2d.Resize({n, d});
// math::CrossEntropyFunctor support axis is the last
if (axis_v == -1) {
paddle::operators::math::CrossEntropyFunctor<GPUContext, T>()(
dev_ctx,
&loss_2d,
&softmax_2d,
&labels_2d,
soft_label,
ignore_index,
axis_dim);
return;
}
// if axis is not the last, we need a new impliment
if (soft_label) {
auto* logits_data = softmax->data<T>();
auto* labels_data = labels.data<T>();
const int kDimLog2 = static_cast<int>(Log2Ceil(axis_dim));
const int kDimCeil = 1 << kDimLog2;
#ifdef __HIPCC__
int kThreadPerBlock = 256;
#else
int kThreadPerBlock = 512;
#endif
int kBatchPerBlock = 1;
int blocks = (n * d + kBatchPerBlock - 1) / kBatchPerBlock;
dim3 threads(kThreadPerBlock / kBatchPerBlock, kBatchPerBlock, 1);
CrossEntropySoftLabel<T, T, false>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_data,
NULL,
logits_data,
labels_data,
n,
axis_dim,
d / axis_dim,
kDimLog2);
} else { // HardLabel
auto* logits_data = softmax->data<T>();
auto* labels_data = labels.data<LabelT>();
int threads = 128;
int blocks = (n * d / axis_dim + threads - 1) / threads;
if (ignore_index >= 0 && ignore_index < axis_dim) {
CrossEntropyHardLabel<T, LabelT, true>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_data,
logits_data,
labels_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
} else {
CrossEntropyHardLabel<T, LabelT, false>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_data,
logits_data,
labels_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
}
}
// cause of input is softmax
// copy to output softmax, directly
phi::Copy<GPUContext>(
dev_ctx, *softmax, dev_ctx.GetPlace(), false, softmax_out);
return;
}
const int rank = logits.dims().size();
const int axis_v = phi::funcs::CanonicalAxis(axis, rank);
int axis_dim = logits.dims()[axis_v];
const int64_t n = phi::funcs::SizeToAxis(axis_v, logits.dims());
const int64_t d = phi::funcs::SizeFromAxis(axis_v, logits.dims());
auto* softmax_data = dev_ctx.template Alloc<T>(softmax);
auto* loss_data = dev_ctx.template Alloc<T>(loss);
if (axis_dim == 1) {
phi::funcs::SetConstant<GPUContext, T> set_constant;
set_constant(dev_ctx, softmax, static_cast<T>(1));
set_constant(dev_ctx, loss, static_cast<T>(0));
return;
}
if (soft_label) {
auto* logits_data = logits.data<T>();
auto* labels_data = label.data<T>();
SoftmaxWithCrossEntropySoftLabel<T>(dev_ctx,
rank,
axis_v,
logits_data,
labels_data,
softmax_data,
loss_data,
n,
axis_dim,
d / axis_dim);
} else {
if (!numeric_stable_mode) {
// CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim
DenseTensor logits_2d(logits);
logits_2d.Resize({n, d});
DenseTensor softmax_2d(*softmax);
softmax_2d.Resize({n, d});
DenseTensor labels_2d(label);
labels_2d.Resize({n, label.numel() / n});
DenseTensor loss_2d(*loss);
loss_2d.Resize({n, 1});
paddle::operators::math::SoftmaxCUDNNFunctor<T, GPUContext>()(
dev_ctx, &logits_2d, &softmax_2d);
paddle::operators::math::CrossEntropyFunctor<GPUContext, T>()(
dev_ctx,
&loss_2d,
&softmax_2d,
&labels_2d,
false,
ignore_index,
axis_dim);
} else {
auto* logits_data = logits.data<T>();
auto* labels_data = label.data<LabelT>();
if (ignore_index >= 0 && ignore_index < axis_dim) {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, true>(dev_ctx,
rank,
axis_v,
logits_data,
labels_data,
loss_data,
softmax_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
} else {
SoftmaxWithCrossEntropyHardLabel<T, LabelT, false>(dev_ctx,
rank,
axis_v,
logits_data,
labels_data,
loss_data,
softmax_data,
n,
axis_dim,
d / axis_dim,
ignore_index);
}
}
}
}
template <typename T, typename Context>
void CrossEntropyWithSoftmaxKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& label,
bool soft_label,
bool use_softmax,
bool numeric_stable_mode,
int ignore_index,
int axis,
DenseTensor* softmax,
DenseTensor* loss) {
auto dtype = label.dtype();
if (soft_label) {
PADDLE_ENFORCE_EQ(
dtype,
paddle::experimental::CppTypeToDataType<T>::Type(),
phi::errors::InvalidArgument("The Input(Label) should be with the "
"same data type as Input(Logits)."));
CrossEntropyWithSoftmaxCUDAKernel<T, T>(dev_ctx,
logits,
label,
soft_label,
use_softmax,
numeric_stable_mode,
ignore_index,
axis,
softmax,
loss);
} else {
PD_VISIT_INTEGRAL_TYPES(dtype, "CrossEntropyWithSoftmaxCUDAKernel", ([&] {
CrossEntropyWithSoftmaxCUDAKernel<T, data_t>(
dev_ctx,
logits,
label,
soft_label,
use_softmax,
numeric_stable_mode,
ignore_index,
axis,
softmax,
loss);
}));
}
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(cross_entropy_with_softmax,
GPU,
ALL_LAYOUT,
phi::CrossEntropyWithSoftmaxKernel,
float,
phi::dtype::float16) {}
#else
PD_REGISTER_KERNEL(cross_entropy_with_softmax,
GPU,
ALL_LAYOUT,
phi::CrossEntropyWithSoftmaxKernel,
float,
double,
phi::dtype::float16) {}
#endif
|
3f904828fda838d3e16e172e904453371e7215c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
// modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_im2col: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in deformable_col2im: %s\n", hipGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>();
hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err));
}
}
| 3f904828fda838d3e16e172e904453371e7215c8.cu | /*!
******************* BEGIN Caffe Copyright Notice and Disclaimer ****************
*
* COPYRIGHT
*
* All contributions by the University of California:
* Copyright (c) 2014-2017 The Regents of the University of California (Regents)
* All rights reserved.
*
* All other contributions:
* Copyright (c) 2014-2017, the respective contributors
* All rights reserved.
*
* Caffe uses a shared copyright model: each contributor holds copyright over
* their contributions to Caffe. The project versioning records all such
* contribution and copyright details. If a contributor wants to further mark
* their specific copyright on a particular contribution, they should indicate
* their copyright solely in the commit message of the change when it is
* committed.
*
* LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* CONTRIBUTION AGREEMENT
*
* By contributing to the BVLC/caffe repository through pull-request, comment,
* or otherwise, the contributor releases their content to the
* license and copyright terms herein.
*
***************** END Caffe Copyright Notice and Disclaimer ********************
*
* Copyright (c) 2018 Microsoft
* Licensed under The MIT License [see LICENSE for details]
* \file modulated_deformable_im2col.cuh
* \brief Function definitions of converting an image to
* column matrix based on kernel, padding, dilation, and offset.
* These functions are mainly used in deformable convolution operators.
* \ref: https://arxiv.org/abs/1703.06211
* \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng
*/
// modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#include <stdio.h>
#include <math.h>
#include <float.h>
using namespace at;
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
const int kMaxGridNum = 65535;
inline int GET_BLOCKS(const int N)
{
return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS);
}
template <typename scalar_t>
__device__ scalar_t deformable_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void deformable_im2col_gpu_kernel(const int n, const scalar_t *data_im, const scalar_t *data_offset,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const scalar_t map_h = i * dilation_h + offset_h;
//const scalar_t map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = deformable_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val;
data_col_ptr += batch_size * height_col * width_col;
}
}
}
}
void deformable_im2col(
const at::Tensor data_im, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h, const int ksize_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w, const int parallel_imgs,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
// todo: check parallel_imgs is correctly passed in
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
channel_per_deformable_group, parallel_imgs, channels, deformable_group,
height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_im2col: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_gpu_kernel(
const int n, const scalar_t *data_col, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) *
2 * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index];
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
void deformable_col2im(
const at::Tensor data_col, const at::Tensor data_offset, const int channels,
const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group,
at::Tensor grad_im)
{
// todo: make sure parallel_imgs is passed in correctly
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs;
int channel_per_deformable_group = channels / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, channels, height, width, ksize_h,
ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in deformable_col2im: %s\n", cudaGetErrorString(err));
}
}
template <typename scalar_t>
__global__ void deformable_col2im_coord_gpu_kernel(const int n, const scalar_t *data_col,
const scalar_t *data_im, const scalar_t *data_offset,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col, scalar_t *grad_offset)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group *
batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) *
channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 *
kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
const scalar_t weight = get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos];
cnt += 1;
}
grad_offset[index] = val;
}
}
void deformable_col2im_coord(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset,
const int channels, const int height, const int width, const int ksize_h,
const int ksize_w, const int pad_h, const int pad_w, const int stride_h,
const int stride_w, const int dilation_h, const int dilation_w,
const int parallel_imgs, const int deformable_group, at::Tensor grad_offset)
{
int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1;
int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1;
int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs;
int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, channels, height, width,
ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group,
height_col, width_col, grad_offset_);
}));
}
template <typename scalar_t>
__device__ scalar_t dmcn_im2col_bilinear(const scalar_t *bottom_data, const int data_width,
const int height, const int width, scalar_t h, scalar_t w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
scalar_t lh = h - h_low;
scalar_t lw = w - w_low;
scalar_t hh = 1 - lh, hw = 1 - lw;
scalar_t v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
scalar_t v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
scalar_t v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
scalar_t v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_gradient_weight(scalar_t argmax_h, scalar_t argmax_w,
const int h, const int w, const int height, const int width)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (h == argmax_h_low && w == argmax_w_low)
weight = (h + 1 - argmax_h) * (w + 1 - argmax_w);
if (h == argmax_h_low && w == argmax_w_high)
weight = (h + 1 - argmax_h) * (argmax_w + 1 - w);
if (h == argmax_h_high && w == argmax_w_low)
weight = (argmax_h + 1 - h) * (w + 1 - argmax_w);
if (h == argmax_h_high && w == argmax_w_high)
weight = (argmax_h + 1 - h) * (argmax_w + 1 - w);
return weight;
}
template <typename scalar_t>
__device__ scalar_t dmcn_get_coordinate_weight(scalar_t argmax_h, scalar_t argmax_w,
const int height, const int width, const scalar_t *im_data,
const int data_width, const int bp_dir)
{
if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width)
{
//empty
return 0;
}
int argmax_h_low = floor(argmax_h);
int argmax_w_low = floor(argmax_w);
int argmax_h_high = argmax_h_low + 1;
int argmax_w_high = argmax_w_low + 1;
scalar_t weight = 0;
if (bp_dir == 0)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
else if (bp_dir == 1)
{
if (argmax_h_low >= 0 && argmax_w_low >= 0)
weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low];
if (argmax_h_low >= 0 && argmax_w_high <= width - 1)
weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high];
if (argmax_h_high <= height - 1 && argmax_w_low >= 0)
weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low];
if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1)
weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high];
}
return weight;
}
template <typename scalar_t>
__global__ void modulated_deformable_im2col_gpu_kernel(const int n,
const scalar_t *data_im, const scalar_t *data_offset, const scalar_t *data_mask,
const int height, const int width, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *data_col)
{
CUDA_KERNEL_LOOP(index, n)
{
// index index of output matrix
const int w_col = index % width_col;
const int h_col = (index / width_col) % height_col;
const int b_col = (index / width_col / height_col) % batch_size;
const int c_im = (index / width_col / height_col) / batch_size;
const int c_col = c_im * kernel_h * kernel_w;
// compute deformable group index
const int deformable_group_index = c_im / channel_per_deformable_group;
const int h_in = h_col * stride_h - pad_h;
const int w_in = w_col * stride_w - pad_w;
scalar_t *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col;
//const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in;
const scalar_t *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width;
const scalar_t *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t val = static_cast<scalar_t>(0);
const scalar_t h_im = h_in + i * dilation_h + offset_h;
const scalar_t w_im = w_in + j * dilation_w + offset_w;
//if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
if (h_im > -1 && w_im > -1 && h_im < height && w_im < width)
{
//const float map_h = i * dilation_h + offset_h;
//const float map_w = j * dilation_w + offset_w;
//const int cur_height = height - h_in;
//const int cur_width = width - w_in;
//val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w);
val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im);
}
*data_col_ptr = val * mask;
data_col_ptr += batch_size * height_col * width_col;
//data_col_ptr += height_col * width_col;
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_im)
{
CUDA_KERNEL_LOOP(index, n)
{
const int j = (index / width_col / height_col / batch_size) % kernel_w;
const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h;
const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h;
// compute the start and end of the output
const int deformable_group_index = c / channel_per_deformable_group;
int w_out = index % width_col;
int h_out = (index / width_col) % height_col;
int b = (index / width_col / height_col) % batch_size;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out;
const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out;
const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out;
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h;
const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w;
const scalar_t cur_top_grad = data_col[index] * mask;
const int cur_h = (int)cur_inv_h_data;
const int cur_w = (int)cur_inv_w_data;
for (int dy = -2; dy <= 2; dy++)
{
for (int dx = -2; dx <= 2; dx++)
{
if (cur_h + dy >= 0 && cur_h + dy < height &&
cur_w + dx >= 0 && cur_w + dx < width &&
abs(cur_inv_h_data - (cur_h + dy)) < 1 &&
abs(cur_inv_w_data - (cur_w + dx)) < 1)
{
int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx;
scalar_t weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width);
atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad);
}
}
}
}
}
template <typename scalar_t>
__global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n,
const scalar_t *data_col, const scalar_t *data_im,
const scalar_t *data_offset, const scalar_t *data_mask,
const int channels, const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int offset_channels, const int deformable_group,
const int height_col, const int width_col,
scalar_t *grad_offset, scalar_t *grad_mask)
{
CUDA_KERNEL_LOOP(index, n)
{
scalar_t val = 0, mval = 0;
int w = index % width_col;
int h = (index / width_col) % height_col;
int c = (index / width_col / height_col) % offset_channels;
int b = (index / width_col / height_col) / offset_channels;
// compute the start and end of the output
const int deformable_group_index = c / (2 * kernel_h * kernel_w);
const int col_step = kernel_h * kernel_w;
int cnt = 0;
const scalar_t *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col;
const scalar_t *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width;
const scalar_t *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col;
const scalar_t *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col;
const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w;
for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step)
{
const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w;
const int bp_dir = offset_c % 2;
int j = (col_pos / width_col / height_col / batch_size) % kernel_w;
int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h;
int w_out = col_pos % width_col;
int h_out = (col_pos / width_col) % height_col;
int w_in = w_out * stride_w - pad_w;
int h_in = h_out * stride_h - pad_h;
const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out);
const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out);
const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out);
const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr];
const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr];
const scalar_t mask = data_mask_ptr[data_mask_hw_ptr];
scalar_t inv_h = h_in + i * dilation_h + offset_h;
scalar_t inv_w = w_in + j * dilation_w + offset_w;
if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width)
{
inv_h = inv_w = -2;
}
else
{
mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w);
}
const scalar_t weight = dmcn_get_coordinate_weight(
inv_h, inv_w,
height, width, data_im_ptr + cnt * height * width, width, bp_dir);
val += weight * data_col_ptr[col_pos] * mask;
cnt += 1;
}
// KERNEL_ASSIGN(grad_offset[index], offset_req, val);
grad_offset[index] = val;
if (offset_c % 2 == 0)
// KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval);
grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval;
}
}
void modulated_deformable_im2col_cuda(
const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kenerl_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor data_col)
{
// num_axes should be smaller than block size
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] {
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w,
pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group,
batch_size, channels, deformable_group, height_col, width_col, data_col_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_cuda(
const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group, at::Tensor grad_im)
{
const int channel_per_deformable_group = channels / deformable_group;
const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_im_ = grad_im.data_ptr<scalar_t>();
modulated_deformable_col2im_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, deformable_group, height_col, width_col, grad_im_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err));
}
}
void modulated_deformable_col2im_coord_cuda(
const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask,
const int batch_size, const int channels, const int height_im, const int width_im,
const int height_col, const int width_col, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w, const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int deformable_group,
at::Tensor grad_offset, at::Tensor grad_mask)
{
const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group;
const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] {
const scalar_t *data_col_ = data_col.data_ptr<scalar_t>();
const scalar_t *data_im_ = data_im.data_ptr<scalar_t>();
const scalar_t *data_offset_ = data_offset.data_ptr<scalar_t>();
const scalar_t *data_mask_ = data_mask.data_ptr<scalar_t>();
scalar_t *grad_offset_ = grad_offset.data_ptr<scalar_t>();
scalar_t *grad_mask_ = grad_mask.data_ptr<scalar_t>();
modulated_deformable_col2im_coord_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>(
num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im,
kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w,
dilation_h, dilation_w, channel_per_deformable_group,
batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col,
grad_offset_, grad_mask_);
}));
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err));
}
}
|
6cc434c4286c59f43fd5a51bb7ba27b9b74c35f7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <device_launch_parameters.h>
#include<time.h>
#include<stdlib.h>
#define N 128
#define BLOCK_SIZE 8
typedef struct {
int width;
int height;
float *elements;
} Matrix;
clock_t start_cpu, stop_cpu, start_gpu, stop_gpu;
double czas_cpu, czas_gpu;
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// kady wtek oblicza jeden element macierzy C
// akumulujc wynik w zmiennej Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// kopiujemy macierze A i B to globalnej pamici urzdzenia
// najpierw A
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc((void **)&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice);
// potem B
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc((void **)&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice);
// przydzielamy macierz C w globalnej pamici urzdzenia
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc((void**)&d_C.elements, size);
// preparujemy rodowisko i wywoujemy kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
start_gpu = clock();
MatMulKernel << <dimGrid, dimBlock >> > (d_A, d_B, d_C);
hipDeviceSynchronize();
stop_gpu = clock();
czas_gpu = (double)1000 * (stop_gpu - start_gpu) / CLOCKS_PER_SEC;
// odbieramy obliczon macierz C z pamici globalnej urzdzenia
hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost);
printf(" Czas GPU: %f[ms]\n", czas_gpu);
printf("\n");
// zwalniamy pami
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
int main(int argc, char** argv)
{
printf(" Lab 2. Zad 1. Macierze\n\n");
printf(" N: %d \n", N);
printf(" BLOCK SIZE: %d \n", BLOCK_SIZE);
int devCnt;
Matrix A, B, C;
hipGetDeviceCount(&devCnt);
if (devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
A.width = N;
A.height = N;
A.elements = (float*)malloc(A.width*A.height * sizeof(float));
for (int i = 0; i < A.width*A.height; i++) {
A.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50);
}
B.width = N;
B.height = N;
B.elements = (float*)malloc(B.width*B.height * sizeof(float));
for (int i = 0; i < B.width*B.height; i++) {
B.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50);
}
C.width = B.width;
C.height = A.height;
C.elements = (float*)malloc(C.width*C.height * sizeof(float));
float **A2D = new float*[A.width];
for (int i = 0; i < A.height; i++)
A2D[i] = new float[A.width];
float **B2D = new float*[A.width];
for (int i = 0; i < A.height; i++)
B2D[i] = new float[A.width];
float **C2D = new float*[A.width];
for (int i = 0; i < A.height; i++)
C2D[i] = new float[A.width];
for (int i = 0; i < A.width; i++) {
for (int j = 0; j < A.height; j++) {
A2D[i][j] = A.elements[i*A.width + j];
}
}
for (int i = 0; i < B.width; i++) {
for (int j = 0; j < B.height; j++) {
B2D[i][j] = B.elements[i*B.width + j];
}
}
for (int i = 0; i < C.width; i++) {
for (int j = 0; j < C.height; j++) {
C2D[i][j] = 0;
}
}
start_cpu = clock();
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < A.width; j++) {
for (int k = 0; k < B.width; k++) {
C2D[i][k] += A2D[i][j] * B2D[j][k];
}
}
}
stop_cpu = clock();
czas_cpu = (double)1000 * (stop_cpu - start_cpu) / CLOCKS_PER_SEC;
printf(" Czas CPU %.2f[ms]\n", czas_cpu);
MatMul(A, B, C);
for (int i = 0; i < A.width; i++) {
delete[] A2D[i];
delete[] B2D[i];
delete[] C2D[i];
}
delete[] A2D;
delete[] B2D;
delete[] C2D;
free(A.elements);
free(B.elements);
free(C.elements);
} | 6cc434c4286c59f43fd5a51bb7ba27b9b74c35f7.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <math.h>
#include <device_launch_parameters.h>
#include<time.h>
#include<stdlib.h>
#define N 128
#define BLOCK_SIZE 8
typedef struct {
int width;
int height;
float *elements;
} Matrix;
clock_t start_cpu, stop_cpu, start_gpu, stop_gpu;
double czas_cpu, czas_gpu;
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// każdy wątek oblicza jeden element macierzy C
// akumulując wynik w zmiennej Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// kopiujemy macierze A i B to globalnej pamięci urządzenia
// najpierw A
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void **)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
// potem B
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void **)&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// przydzielamy macierz C w globalnej pamięci urządzenia
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc((void**)&d_C.elements, size);
// preparujemy środowisko i wywołujemy kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
start_gpu = clock();
MatMulKernel << <dimGrid, dimBlock >> > (d_A, d_B, d_C);
cudaThreadSynchronize();
stop_gpu = clock();
czas_gpu = (double)1000 * (stop_gpu - start_gpu) / CLOCKS_PER_SEC;
// odbieramy obliczoną macierz C z pamięci globalnej urządzenia
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
printf(" Czas GPU: %f[ms]\n", czas_gpu);
printf("\n");
// zwalniamy pamięć
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main(int argc, char** argv)
{
printf(" Lab 2. Zad 1. Macierze\n\n");
printf(" N: %d \n", N);
printf(" BLOCK SIZE: %d \n", BLOCK_SIZE);
int devCnt;
Matrix A, B, C;
cudaGetDeviceCount(&devCnt);
if (devCnt == 0) {
perror("No CUDA devices available -- exiting.");
return 1;
}
A.width = N;
A.height = N;
A.elements = (float*)malloc(A.width*A.height * sizeof(float));
for (int i = 0; i < A.width*A.height; i++) {
A.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50);
}
B.width = N;
B.height = N;
B.elements = (float*)malloc(B.width*B.height * sizeof(float));
for (int i = 0; i < B.width*B.height; i++) {
B.elements[i] = ((float)(rand() % 100) / 100) + (rand() % 50);
}
C.width = B.width;
C.height = A.height;
C.elements = (float*)malloc(C.width*C.height * sizeof(float));
float **A2D = new float*[A.width];
for (int i = 0; i < A.height; i++)
A2D[i] = new float[A.width];
float **B2D = new float*[A.width];
for (int i = 0; i < A.height; i++)
B2D[i] = new float[A.width];
float **C2D = new float*[A.width];
for (int i = 0; i < A.height; i++)
C2D[i] = new float[A.width];
for (int i = 0; i < A.width; i++) {
for (int j = 0; j < A.height; j++) {
A2D[i][j] = A.elements[i*A.width + j];
}
}
for (int i = 0; i < B.width; i++) {
for (int j = 0; j < B.height; j++) {
B2D[i][j] = B.elements[i*B.width + j];
}
}
for (int i = 0; i < C.width; i++) {
for (int j = 0; j < C.height; j++) {
C2D[i][j] = 0;
}
}
start_cpu = clock();
for (int i = 0; i < A.height; i++) {
for (int j = 0; j < A.width; j++) {
for (int k = 0; k < B.width; k++) {
C2D[i][k] += A2D[i][j] * B2D[j][k];
}
}
}
stop_cpu = clock();
czas_cpu = (double)1000 * (stop_cpu - start_cpu) / CLOCKS_PER_SEC;
printf(" Czas CPU %.2f[ms]\n", czas_cpu);
MatMul(A, B, C);
for (int i = 0; i < A.width; i++) {
delete[] A2D[i];
delete[] B2D[i];
delete[] C2D[i];
}
delete[] A2D;
delete[] B2D;
delete[] C2D;
free(A.elements);
free(B.elements);
free(C.elements);
} |
5abf74ad7767af30329a5a14468bcd50f53bb5b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "lsl.h"
#define NBLOCKS_TRUE 512
#define NTHREADS_TRUE 512 * 2
__global__ void kernel1(List list) {
if(threadIdx.x == 0) {
list.insert_node(blockIdx.x,NULL);
if(blockIdx.x % 2 == 0)
list.delete_node(blockIdx.x);
}
}
__global__ void printList(List list) {
//list.printlist();
}
int main() {
int nblocks_host = 0;
List list;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
///kernel
hipLaunchKernelGGL(( kernel1), dim3(NBLOCKS_TRUE),dim3(NTHREADS_TRUE), 0, 0, list);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
printf("blockCounter1 <<< %d, %d >>> () counted %d blocks in %f ms.\n",
NBLOCKS_TRUE,
NTHREADS_TRUE,
nblocks_host,
elapsedTime);
hipLaunchKernelGGL(( printList), dim3(1),dim3(1), 0, 0, list);
if(hipDeviceSynchronize()!=hipSuccess)
printf("Error at GPU kernel \n");
return 0;
}
| 5abf74ad7767af30329a5a14468bcd50f53bb5b1.cu | #include <stdio.h>
#include "lsl.h"
#define NBLOCKS_TRUE 512
#define NTHREADS_TRUE 512 * 2
__global__ void kernel1(List list) {
if(threadIdx.x == 0) {
list.insert_node(blockIdx.x,NULL);
if(blockIdx.x % 2 == 0)
list.delete_node(blockIdx.x);
}
}
__global__ void printList(List list) {
//list.printlist();
}
int main() {
int nblocks_host = 0;
List list;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
///kernel
kernel1<<<NBLOCKS_TRUE,NTHREADS_TRUE>>>(list);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("blockCounter1 <<< %d, %d >>> () counted %d blocks in %f ms.\n",
NBLOCKS_TRUE,
NTHREADS_TRUE,
nblocks_host,
elapsedTime);
printList<<<1,1>>>(list);
if(cudaDeviceSynchronize()!=cudaSuccess)
printf("Error at GPU kernel \n");
return 0;
}
|
a8e2c01a3c87e676501de8b59e3a91810c33beaa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box2d2r-512-10-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
double __reg_10_0;
double __reg_10_1;
double __reg_10_2;
double __reg_10_3;
double __reg_10_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC10(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(2, __reg_10_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(3, __reg_10_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(4, __reg_10_4);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(5, __reg_10_0);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(6, __reg_10_1);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(7, __reg_10_2);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(8, __reg_10_3);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(9, __reg_10_4);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(10, __reg_10_0);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(11, __reg_10_1);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(12, __reg_10_2);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(13, __reg_10_3);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(14, __reg_10_4);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(15, __reg_10_0);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(16, __reg_10_1);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(17, __reg_10_2);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(18, __reg_10_3);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(19, __reg_10_4);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_4, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_0, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_1, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_2, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_3, __reg_10_4, __reg_9_1);
__STORE(__h + 3, __reg_10_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(2, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(3, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(4, __reg_9_4);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(5, __reg_9_0);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(6, __reg_9_1);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(7, __reg_9_2);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(8, __reg_9_3);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(9, __reg_9_4);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(10, __reg_9_0);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(11, __reg_9_1);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(12, __reg_9_2);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(13, __reg_9_3);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(14, __reg_9_4);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(15, __reg_9_0);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(16, __reg_9_1);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(17, __reg_9_2);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__STORE(__h + 3, __reg_9_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(2, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(3, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(4, __reg_8_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(5, __reg_8_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(6, __reg_8_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(7, __reg_8_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(8, __reg_8_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(9, __reg_8_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(10, __reg_8_0);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(11, __reg_8_1);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(12, __reg_8_2);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(13, __reg_8_3);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(14, __reg_8_4);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(15, __reg_8_0);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__STORE(__h + 3, __reg_8_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(2, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(3, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(4, __reg_7_4);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(5, __reg_7_0);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(6, __reg_7_1);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(7, __reg_7_2);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(8, __reg_7_3);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(9, __reg_7_4);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(10, __reg_7_0);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(11, __reg_7_1);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(12, __reg_7_2);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(13, __reg_7_3);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__STORE(__h + 3, __reg_7_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(2, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(3, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(4, __reg_6_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(5, __reg_6_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(6, __reg_6_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(7, __reg_6_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(8, __reg_6_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(9, __reg_6_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(10, __reg_6_0);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(11, __reg_6_1);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__STORE(__h + 3, __reg_6_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(2, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(3, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(4, __reg_5_4);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(5, __reg_5_0);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(6, __reg_5_1);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(7, __reg_5_2);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(8, __reg_5_3);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(9, __reg_5_4);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__STORE(__h + 3, __reg_5_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(3, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(5, __reg_4_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(6, __reg_4_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(7, __reg_4_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__STORE(__h + 3, __reg_4_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(3, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(5, __reg_3_0);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__STORE(__h + 3, __reg_3_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(2, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(3, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__STORE(__h + 3, __reg_2_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
| a8e2c01a3c87e676501de8b59e3a91810c33beaa.cu | #include "box2d2r-512-10-512_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
double __reg_10_0;
double __reg_10_1;
double __reg_10_2;
double __reg_10_3;
double __reg_10_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC10(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(2, __reg_10_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(3, __reg_10_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(4, __reg_10_4);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(5, __reg_10_0);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(6, __reg_10_1);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(7, __reg_10_2);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(8, __reg_10_3);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(9, __reg_10_4);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(10, __reg_10_0);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(11, __reg_10_1);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(12, __reg_10_2);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(13, __reg_10_3);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(14, __reg_10_4);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(15, __reg_10_0);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(16, __reg_10_1);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(17, __reg_10_2);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(18, __reg_10_3);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(19, __reg_10_4);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__LOAD(__reg_0, 37);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__LOAD(__reg_0, 38);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__LOAD(__reg_0, 39);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__LOAD(__reg_0, 40);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(20, __reg_10_0);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_4, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_9_2 = __reg_8_2;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC10(__reg_10_4, __reg_10_4, __reg_10_4, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_0, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_9_3 = __reg_8_3;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC10(__reg_10_0, __reg_10_0, __reg_10_0, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_1, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_9_4 = __reg_8_4;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC10(__reg_10_1, __reg_10_1, __reg_10_1, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_2, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 19, __reg_10_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 18, __reg_10_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 17, __reg_10_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 16, __reg_10_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 15, __reg_10_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 14, __reg_10_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 13, __reg_10_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 12, __reg_10_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 11, __reg_10_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 10, __reg_10_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 9, __reg_10_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 8, __reg_10_3);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 7, __reg_10_4);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 6, __reg_10_0);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 5, __reg_10_1);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 4, __reg_10_2);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 3, __reg_10_3);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 2, __reg_10_4);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 1, __reg_10_0);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h + 0, __reg_10_1);
__reg_9_0 = __reg_8_0;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h + 1, __reg_10_2);
__reg_9_1 = __reg_8_1;
__CALC10(__reg_10_2, __reg_10_2, __reg_10_2, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h + 2, __reg_10_3);
__CALC10(__reg_10_3, __reg_10_3, __reg_10_3, __reg_10_3, __reg_10_4, __reg_9_1);
__STORE(__h + 3, __reg_10_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__CALC10(__reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_9_3);
__STORE(__h - 20, __reg_10_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__CALC10(__reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_10_2, __reg_9_4);
__STORE(__h - 20, __reg_10_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_10_3, __reg_9_0);
__STORE(__h - 20, __reg_10_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__CALC10(__reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_10_4, __reg_9_1);
__STORE(__h - 20, __reg_10_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__CALC10(__reg_10_4, __reg_10_3, __reg_10_2, __reg_10_1, __reg_10_0, __reg_9_2);
__STORE(__h - 20, __reg_10_0);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC9(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(2, __reg_9_2);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(3, __reg_9_3);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(4, __reg_9_4);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(5, __reg_9_0);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(6, __reg_9_1);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(7, __reg_9_2);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(8, __reg_9_3);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(9, __reg_9_4);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(10, __reg_9_0);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(11, __reg_9_1);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(12, __reg_9_2);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(13, __reg_9_3);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(14, __reg_9_4);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(15, __reg_9_0);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(16, __reg_9_1);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(17, __reg_9_2);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__LOAD(__reg_0, 33);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__LOAD(__reg_0, 34);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__LOAD(__reg_0, 35);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__LOAD(__reg_0, 36);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(18, __reg_9_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_0, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_8_3 = __reg_7_3;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC9(__reg_9_0, __reg_9_0, __reg_9_0, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_1, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_8_4 = __reg_7_4;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC9(__reg_9_1, __reg_9_1, __reg_9_1, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_2, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_8_0 = __reg_7_0;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC9(__reg_9_2, __reg_9_2, __reg_9_2, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_3, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 17, __reg_9_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 16, __reg_9_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 15, __reg_9_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 14, __reg_9_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 13, __reg_9_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 12, __reg_9_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 11, __reg_9_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 10, __reg_9_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 9, __reg_9_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 8, __reg_9_4);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 7, __reg_9_0);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 6, __reg_9_1);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 5, __reg_9_2);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 4, __reg_9_3);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 3, __reg_9_4);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 2, __reg_9_0);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 1, __reg_9_1);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h + 0, __reg_9_2);
__reg_8_1 = __reg_7_1;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h + 1, __reg_9_3);
__reg_8_2 = __reg_7_2;
__CALC9(__reg_9_3, __reg_9_3, __reg_9_3, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h + 2, __reg_9_4);
__CALC9(__reg_9_4, __reg_9_4, __reg_9_4, __reg_9_4, __reg_9_0, __reg_8_2);
__STORE(__h + 3, __reg_9_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__CALC9(__reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_8_1);
__STORE(__h - 18, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__CALC9(__reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_9_0, __reg_8_2);
__STORE(__h - 18, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__CALC9(__reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_9_1, __reg_8_3);
__STORE(__h - 18, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__CALC9(__reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_9_2, __reg_8_4);
__STORE(__h - 18, __reg_9_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_9_4, __reg_9_3, __reg_8_0);
__STORE(__h - 18, __reg_9_3);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC8(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(2, __reg_8_2);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(3, __reg_8_3);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(4, __reg_8_4);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(5, __reg_8_0);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(6, __reg_8_1);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(7, __reg_8_2);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(8, __reg_8_3);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(9, __reg_8_4);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(10, __reg_8_0);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(11, __reg_8_1);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(12, __reg_8_2);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(13, __reg_8_3);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(14, __reg_8_4);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(15, __reg_8_0);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__LOAD(__reg_0, 29);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__LOAD(__reg_0, 30);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__LOAD(__reg_0, 31);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__LOAD(__reg_0, 32);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(16, __reg_8_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_1, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_7_4 = __reg_6_4;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC8(__reg_8_1, __reg_8_1, __reg_8_1, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_2, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_7_0 = __reg_6_0;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC8(__reg_8_2, __reg_8_2, __reg_8_2, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_3, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_7_1 = __reg_6_1;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC8(__reg_8_3, __reg_8_3, __reg_8_3, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_4, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 15, __reg_8_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 14, __reg_8_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 13, __reg_8_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 12, __reg_8_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 11, __reg_8_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 10, __reg_8_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 9, __reg_8_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 8, __reg_8_0);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 7, __reg_8_1);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 6, __reg_8_2);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 5, __reg_8_3);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 4, __reg_8_4);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 3, __reg_8_0);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 2, __reg_8_1);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 1, __reg_8_2);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h + 0, __reg_8_3);
__reg_7_2 = __reg_6_2;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h + 1, __reg_8_4);
__reg_7_3 = __reg_6_3;
__CALC8(__reg_8_4, __reg_8_4, __reg_8_4, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h + 2, __reg_8_0);
__CALC8(__reg_8_0, __reg_8_0, __reg_8_0, __reg_8_0, __reg_8_1, __reg_7_3);
__STORE(__h + 3, __reg_8_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__CALC8(__reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_7_4);
__STORE(__h - 16, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_8_3, __reg_7_0);
__STORE(__h - 16, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__CALC8(__reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_8_4, __reg_7_1);
__STORE(__h - 16, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__CALC8(__reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_8_0, __reg_7_2);
__STORE(__h - 16, __reg_8_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__CALC8(__reg_8_0, __reg_8_4, __reg_8_3, __reg_8_2, __reg_8_1, __reg_7_3);
__STORE(__h - 16, __reg_8_1);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC7(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(2, __reg_7_2);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(3, __reg_7_3);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(4, __reg_7_4);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(5, __reg_7_0);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(6, __reg_7_1);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(7, __reg_7_2);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(8, __reg_7_3);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(9, __reg_7_4);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(10, __reg_7_0);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(11, __reg_7_1);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(12, __reg_7_2);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(13, __reg_7_3);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__LOAD(__reg_0, 25);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__LOAD(__reg_0, 26);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__LOAD(__reg_0, 27);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__LOAD(__reg_0, 28);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(14, __reg_7_4);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_2, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_6_0 = __reg_5_0;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC7(__reg_7_2, __reg_7_2, __reg_7_2, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_3, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_6_1 = __reg_5_1;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC7(__reg_7_3, __reg_7_3, __reg_7_3, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_4, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_6_2 = __reg_5_2;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC7(__reg_7_4, __reg_7_4, __reg_7_4, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_0, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 13, __reg_7_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 12, __reg_7_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 11, __reg_7_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 10, __reg_7_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 9, __reg_7_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 8, __reg_7_1);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 7, __reg_7_2);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 6, __reg_7_3);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 5, __reg_7_4);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 4, __reg_7_0);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 3, __reg_7_1);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 2, __reg_7_2);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 1, __reg_7_3);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h + 0, __reg_7_4);
__reg_6_3 = __reg_5_3;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h + 1, __reg_7_0);
__reg_6_4 = __reg_5_4;
__CALC7(__reg_7_0, __reg_7_0, __reg_7_0, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h + 2, __reg_7_1);
__CALC7(__reg_7_1, __reg_7_1, __reg_7_1, __reg_7_1, __reg_7_2, __reg_6_4);
__STORE(__h + 3, __reg_7_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__CALC7(__reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_6_2);
__STORE(__h - 14, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__CALC7(__reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_7_1, __reg_6_3);
__STORE(__h - 14, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__CALC7(__reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_7_2, __reg_6_4);
__STORE(__h - 14, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_7_3, __reg_6_0);
__STORE(__h - 14, __reg_7_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__CALC7(__reg_7_3, __reg_7_2, __reg_7_1, __reg_7_0, __reg_7_4, __reg_6_1);
__STORE(__h - 14, __reg_7_4);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC6(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(2, __reg_6_2);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(3, __reg_6_3);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(4, __reg_6_4);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(5, __reg_6_0);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(6, __reg_6_1);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(7, __reg_6_2);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(8, __reg_6_3);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(9, __reg_6_4);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(10, __reg_6_0);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(11, __reg_6_1);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__LOAD(__reg_0, 21);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__LOAD(__reg_0, 22);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__LOAD(__reg_0, 23);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__LOAD(__reg_0, 24);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(12, __reg_6_2);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_3, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_5_1 = __reg_4_1;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC6(__reg_6_3, __reg_6_3, __reg_6_3, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_4, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_5_2 = __reg_4_2;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC6(__reg_6_4, __reg_6_4, __reg_6_4, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_0, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_5_3 = __reg_4_3;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC6(__reg_6_0, __reg_6_0, __reg_6_0, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_1, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 11, __reg_6_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 10, __reg_6_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 9, __reg_6_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 8, __reg_6_2);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 7, __reg_6_3);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 6, __reg_6_4);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 5, __reg_6_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 4, __reg_6_1);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 3, __reg_6_2);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 2, __reg_6_3);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 1, __reg_6_4);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h + 0, __reg_6_0);
__reg_5_4 = __reg_4_4;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h + 1, __reg_6_1);
__reg_5_0 = __reg_4_0;
__CALC6(__reg_6_1, __reg_6_1, __reg_6_1, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h + 2, __reg_6_2);
__CALC6(__reg_6_2, __reg_6_2, __reg_6_2, __reg_6_2, __reg_6_3, __reg_5_0);
__STORE(__h + 3, __reg_6_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_5_0);
__STORE(__h - 12, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__CALC6(__reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_6_4, __reg_5_1);
__STORE(__h - 12, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__CALC6(__reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_6_0, __reg_5_2);
__STORE(__h - 12, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__CALC6(__reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_6_1, __reg_5_3);
__STORE(__h - 12, __reg_6_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__CALC6(__reg_6_1, __reg_6_0, __reg_6_4, __reg_6_3, __reg_6_2, __reg_5_4);
__STORE(__h - 12, __reg_6_2);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC5(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(2, __reg_5_2);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(3, __reg_5_3);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(4, __reg_5_4);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(5, __reg_5_0);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(6, __reg_5_1);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(7, __reg_5_2);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(8, __reg_5_3);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(9, __reg_5_4);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__LOAD(__reg_0, 17);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__LOAD(__reg_0, 18);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__LOAD(__reg_0, 19);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__LOAD(__reg_0, 20);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(10, __reg_5_0);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_4, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_4_2 = __reg_3_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC5(__reg_5_4, __reg_5_4, __reg_5_4, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_0, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_4_3 = __reg_3_3;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC5(__reg_5_0, __reg_5_0, __reg_5_0, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_1, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_4_4 = __reg_3_4;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC5(__reg_5_1, __reg_5_1, __reg_5_1, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_2, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 9, __reg_5_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 8, __reg_5_3);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 7, __reg_5_4);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 6, __reg_5_0);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 5, __reg_5_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 4, __reg_5_2);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 3, __reg_5_3);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 2, __reg_5_4);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 1, __reg_5_0);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h + 0, __reg_5_1);
__reg_4_0 = __reg_3_0;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h + 1, __reg_5_2);
__reg_4_1 = __reg_3_1;
__CALC5(__reg_5_2, __reg_5_2, __reg_5_2, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h + 2, __reg_5_3);
__CALC5(__reg_5_3, __reg_5_3, __reg_5_3, __reg_5_3, __reg_5_4, __reg_4_1);
__STORE(__h + 3, __reg_5_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__CALC5(__reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_4_3);
__STORE(__h - 10, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__CALC5(__reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_5_2, __reg_4_4);
__STORE(__h - 10, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_5_3, __reg_4_0);
__STORE(__h - 10, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__CALC5(__reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_5_4, __reg_4_1);
__STORE(__h - 10, __reg_5_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__CALC5(__reg_5_4, __reg_5_3, __reg_5_2, __reg_5_1, __reg_5_0, __reg_4_2);
__STORE(__h - 10, __reg_5_0);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC4(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(3, __reg_4_3);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(4, __reg_4_4);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(5, __reg_4_0);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(6, __reg_4_1);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(7, __reg_4_2);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__LOAD(__reg_0, 13);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__LOAD(__reg_0, 14);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__LOAD(__reg_0, 15);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__LOAD(__reg_0, 16);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(8, __reg_4_3);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_0, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_3_3 = __reg_2_3;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_0, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_1, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_3_4 = __reg_2_4;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_1, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_2, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_2, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_3, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 7, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 6, __reg_4_1);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 5, __reg_4_2);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 4, __reg_4_3);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 3, __reg_4_4);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 2, __reg_4_0);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 1, __reg_4_1);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h + 0, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h + 1, __reg_4_3);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_3, __reg_4_3, __reg_4_3, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h + 2, __reg_4_4);
__CALC4(__reg_4_4, __reg_4_4, __reg_4_4, __reg_4_4, __reg_4_0, __reg_3_2);
__STORE(__h + 3, __reg_4_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__CALC4(__reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_3_1);
__STORE(__h - 8, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__CALC4(__reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_4_0, __reg_3_2);
__STORE(__h - 8, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_4_1, __reg_3_3);
__STORE(__h - 8, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_4_2, __reg_3_4);
__STORE(__h - 8, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_4_4, __reg_4_3, __reg_3_0);
__STORE(__h - 8, __reg_4_3);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC3(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(3, __reg_3_3);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(4, __reg_3_4);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(5, __reg_3_0);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__LOAD(__reg_0, 9);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__LOAD(__reg_0, 10);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__LOAD(__reg_0, 11);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__LOAD(__reg_0, 12);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(6, __reg_3_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_1, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_2_4 = __reg_1_4;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_1, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_2, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_2, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_3, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_3, __reg_3_3, __reg_3_3, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_4, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 5, __reg_3_3);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 4, __reg_3_4);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 3, __reg_3_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 2, __reg_3_1);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 1, __reg_3_2);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h + 0, __reg_3_3);
__reg_2_2 = __reg_1_2;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h + 1, __reg_3_4);
__reg_2_3 = __reg_1_3;
__CALC3(__reg_3_4, __reg_3_4, __reg_3_4, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h + 2, __reg_3_0);
__CALC3(__reg_3_0, __reg_3_0, __reg_3_0, __reg_3_0, __reg_3_1, __reg_2_3);
__STORE(__h + 3, __reg_3_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_2_4);
__STORE(__h - 6, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_3_3, __reg_2_0);
__STORE(__h - 6, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__CALC3(__reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_3_4, __reg_2_1);
__STORE(__h - 6, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__CALC3(__reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_3_0, __reg_2_2);
__STORE(__h - 6, __reg_3_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_4, __reg_3_3, __reg_3_2, __reg_3_1, __reg_2_3);
__STORE(__h - 6, __reg_3_1);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __CALC2(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(2, __reg_2_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(3, __reg_2_3);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(4, __reg_2_4);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__reg_1_4 = __reg_0;
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_2, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__reg_1_0 = __reg_0;
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_2, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_3, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__reg_1_1 = __reg_0;
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_3, __reg_2_3, __reg_2_3, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_4, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__reg_1_2 = __reg_0;
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__CALC2(__reg_2_4, __reg_2_4, __reg_2_4, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_0, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 3, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 2, __reg_2_2);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 1, __reg_2_3);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h + 0, __reg_2_4);
__reg_1_3 = __reg_0;
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h + 1, __reg_2_0);
__reg_1_4 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_0, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h + 2, __reg_2_1);
__CALC2(__reg_2_1, __reg_2_1, __reg_2_1, __reg_2_1, __reg_2_2, __reg_1_4);
__STORE(__h + 3, __reg_2_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__CALC2(__reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_1_2);
__STORE(__h - 4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__CALC2(__reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_2_1, __reg_1_3);
__STORE(__h - 4, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_2_2, __reg_1_4);
__STORE(__h - 4, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_2_3, __reg_1_0);
__STORE(__h - 4, __reg_2_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_3, __reg_2_2, __reg_2_1, __reg_2_0, __reg_2_4, __reg_1_1);
__STORE(__h - 4, __reg_2_4);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 512;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __a_sb_double[__blockSize * 2];
double *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((0.03125f * (__SBREF(__a_sb, -2))) + (0.03126f * (__SBREF(__a_sb, -1)))) + (0.03127f * (__REGREF(__a, 0)))) + (0.03128f * (__SBREF(__a_sb, 1)))) + (0.03129f * (__SBREF(__a_sb, 2)))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((0.03130f * (__SBREF(__a_sb, -2)))) + (0.03131f * (__SBREF(__a_sb, -1)))) + (0.03132f * (__REGREF(__a, 0)))) + (0.03133f * (__SBREF(__a_sb, 1)))) + (0.03134f * (__SBREF(__a_sb, 2))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((0.03135f * (__SBREF(__a_sb, -2)))) + (0.03136f * (__SBREF(__a_sb, -1)))) + (0.24712f * (__REGREF(__a, 0)))) + (0.03138f * (__SBREF(__a_sb, 1)))) + (0.03139f * (__SBREF(__a_sb, 2)))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = (((((((((((0.03140f * (__SBREF(__a_sb, -2)))) + (0.03141f * (__SBREF(__a_sb, -1)))) + (0.03142f * (__REGREF(__a, 0)))) + (0.03143f * (__SBREF(__a_sb, 1)))) + (0.03144f * (__SBREF(__a_sb, 2))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((0.03145f * (__SBREF(__a_sb, -2)))) + (0.03146f * (__SBREF(__a_sb, -1)))) + (0.03147f * (__REGREF(__a, 0)))) + (0.03148f * (__SBREF(__a_sb, 1)))) + (0.03149f * (__SBREF(__a_sb, 2)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, reg); } else out2 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(2, __reg_1_2);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 1, __reg_1_4);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_4, __reg_0);
__STORE(__h - 2, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 2, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 2, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 2, __reg_1_2);
__h++;
}
}
|
33ee3d23921764033b39f0047cfa0446a155fa84.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <iostream>
#include <string>
#include <set>
#include <map>
#include "../../nvmatrix/include/nvmatrix.cuh"
#include "../../nvmatrix/include/nvmatrix_operators.cuh"
#include "../../util/include/matrix.h"
#include "../include/convnet.cuh"
#include "../include/util.cuh"
using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyObject* layerParams, intv& deviceIDs,
int minibatchSize, bool conserveMem) : Thread(true) {
_deviceIDs = deviceIDs;
_data = NULL;
_bufferData = NULL;
_bufferMinibatchIdx = -1;
_bufferPassIdx = -1;
_trainingProgress = 0;
_totalPassesDone = 0;
_conserveMem = conserveMem;
_sync = new ThreadSynchronizer(deviceIDs.size() + 1);
PyObjectV* layerList = pyDictGetValues(layerParams);
std::sort(layerList->begin(), layerList->end(), LayerIDComparator());
_dataCopyPD = new PipeDispenserBlocking(DIVUP(_deviceIDs.size(),2)); // hard-coded for now
initDataLayers(layerList);
initGPUThreads(layerList);
connectReplicas(); // Connect replicas to one another
connectChildren(layerParams); // Connect forward/backward links in graph
_numFwdTerminal = 0;
// Execute post-initialization stuff
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
for (int r = 0; r < it->second.size(); r++) {
_numFwdTerminal += it->second[r]->getNext().size() == 0;
if (it->second[r]->getNext().size() == 0) {
printf("Fwd terminal: %s\n", it->second[r]->getName().c_str());
}
it->second[r]->postInit();
}
}
// Find and count the terminal nodes in the backward pass
for (int p = 0; p < getNumPasses(); p++) {
set<Layer*> visited;
_numBwdTerminal[p] = 0;
for (int t = 0; t < _convNetThreads.size(); t++) {
vector<CostLayer*>& cl = _convNetThreads[t]->getCostLayers();
for (int c = 0; c < cl.size(); c++) {
findBwdTerminal(*cl[c], visited, _numBwdTerminal[p], p);
}
}
}
_dp = new DataProvider(minibatchSize);
// Py_DECREF(layerList);
delete layerList;
}
ConvNet::~ConvNet() {
for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) {
(*it)->getMessageQueue().enqueue(new Message(EXIT_CONVNET));
(*it)->join();
delete *it;
}
for (DataLayerVector::const_iterator it = _dataLayers.begin(); it != _dataLayers.end(); ++it) {
delete *it;
}
for (intv::const_iterator it = _deviceIDs.begin(); it != _deviceIDs.end(); ++it) {
DEVICE_MEMORY_MANAGER::destroyInstance(*it);
}
HOST_MEMORY_MANAGER::destroyInstance();
delete _sync;
delete _dataCopyPD;
delete _dp;
}
void ConvNet::stop() {
getWorkerQueue().enqueue(new ExitWorker(*this));
join();
}
PipeDispenser& ConvNet::getDataCopyPD() {
return *_dataCopyPD;
}
void ConvNet::initDataLayers(PyObjectV* layerList) {
for (int i = 0; i < layerList->size(); i++) {
PyObject* paramsDict = layerList->at(i);
std::string layerType = pyDictGetString(paramsDict, "type");
if (layerType == "data") {
int numReplicas = pyDictGetInt(paramsDict, "numReplicas");
for (int r = 0; r < numReplicas; ++r) {
DataLayer* dataLayer = new DataLayer(this, paramsDict, r);
_dataLayers.push_back(dataLayer);
_layerMap[dataLayer->getName()][r] = dataLayer;
}
}
}
}
void ConvNet::initGPUThreads(PyObjectV* layerList) {
// Initialize GPU worker threads
for (int i = 0; i < _deviceIDs.size(); ++i) {
ConvNetThread* cng = new ConvNetThread(layerList, _deviceIDs[i], i, this);
_convNetThreads.push_back(cng);
for (NameLayerMap::iterator it = cng->getLayerMap().begin(); it != cng->getLayerMap().end(); ++it) {
const std::string& name = it->first;
Layer* layer = it->second;
_layerMap[name][layer->getReplicaID()] = layer;
}
}
}
void ConvNet::connectReplicas() {
_numReplicasMax = 0;
_numReplicasMin = 1 << 16;
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
_numReplicasMax = max(_numReplicasMax, int(it->second.size()));
_numReplicasMin = min(_numReplicasMin, int(it->second.size()));
for (map<int,Layer*>::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
Layer& l1 = *it2->second;
for (map<int,Layer*>::iterator it3 = it->second.begin(); it3 != it->second.end(); ++it3) {
Layer& l2 = *it3->second;
l1.addReplica(l2);
}
}
}
}
void ConvNet::connectChildren(PyObject* layerParams) {
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
PyObject* paramsDict = PyDict_GetItemString(layerParams, it->first.c_str());
PyObject* inputList = PyDict_GetItemString(paramsDict, "inputs");
if (inputList != NULL) {
// Iterate over "replicas" of this layer
int numReplicas = _layerMap[it->first].size();
for (int i = 0; i < PyList_GET_SIZE(inputList); i++) {
std::string inputName = PyString_AsString(PyList_GetItem(inputList, i));
int numReplicasPrev = _layerMap[inputName].size();
// How many replicas from the previous layer must this layer be connected to?
int numInputReplicas = numReplicasPrev / numReplicas;
for (int r = 0; r < numReplicas; r++) {
for (int rp = r, ridx = 0; ridx < numInputReplicas; rp += numReplicas, ridx++) {
it->second[r]->addPrev(*_layerMap[inputName][rp], ridx);
_layerMap[inputName][rp]->addNext(*it->second[r]);
}
}
}
}
}
}
void ConvNet::findBwdTerminal(Layer& l, set<Layer*>& visited, int& terminal, int passIdx) {
if (visited.count(&l) == 0) {
visited.insert(&l);
if (l.isGradConsumer()) {
bool hasPrevConsumer = false;
if (l.getPrev().size() > 0) {
for (int i = 0; i < l.getPrev()[0].size(); i++) {
// Looking only at 0th replica is fine to see if you have
// grad consumers below you.
hasPrevConsumer |= l.getPrev()[0][i]->isGradConsumer();
}
}
if (!hasPrevConsumer || !l.isGradProducer() || (passIdx + 1 < l.getNumReplicasPrev() && l.getNumReplicasPrev() > l.getNumReplicas())) {
terminal++;
l.setBwdTerminal(passIdx);
printf("found bwd terminal %s[%d] in passIdx=%d\n", l.getName().c_str(), l.getReplicaID(), passIdx);
} else if (l.isGradProducer()) {
for (int r = 0; r < l.getPrev().size(); r++) {
for (int i = 0; i < l.getPrev()[r].size(); i++) {
findBwdTerminal(*l.getPrev()[r][i], visited, terminal, passIdx);
}
}
}
}
}
}
void* ConvNet::run() {
for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) {
(*it)->start();
}
// The manager thread defaults to using the GPU of the first worker.
// Put more logic here if this is inappropriate.
NVMatrix::setDeviceID(_convNetThreads[0]->getDeviceID());
copyToGPU();
bool exit = false;
while (!exit) {
Worker* worker = _workerQueue.dequeue();
exit = worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::getLayer(std::string& name, int replicaID) {
return *_layerMap[name][replicaID];
}
void ConvNet::sendMessage(MESSAGES msg, bool sync) {
sendMessage(new Message(msg), sync);
}
void ConvNet::sendMessage(Message* msg, bool sync) {
for (int i = 0; i < _convNetThreads.size(); i++) {
_convNetThreads[i]->getMessageQueue().enqueue(msg->clone());
}
delete msg;
if (sync) {
syncWithChildren();
}
}
void ConvNet::copyToCPU() {
sendMessage(COPY_TO_CPU, true);
}
void ConvNet::copyToGPU() {
sendMessage(COPY_TO_GPU, false);
}
void ConvNet::updateWeights(int passIdx) {
sendMessage(UPDATE_WEIGHTS, true);
sendMessage(CONSTRAIN_WEIGHTS, true);
}
void ConvNet::reset(int passIdx) {
sendMessage((passIdx % getNumPasses()) == 0 ? RESET : RESET_PASS_IDX, false);
}
void ConvNet::reset() {
reset(0);
}
// Fprop given data
void ConvNet::fprop(CPUData& data, int passIdx, PASS_TYPE passType) {
reset(passIdx);
// This is necessary because setData below could delete data. If there's
// an outstanding copy request, this'll cause a segfault.
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->waitForCopyFinish();
}
setData(data, passIdx);
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(passType, passIdx, false);
}
waitForTerminals(_numFwdTerminal, FPROP_TERMINAL);
}
// Fprop given minibatch idx
void ConvNet::fprop(int miniIdx, int passIdx, PASS_TYPE passType) {
reset(passIdx);
bool fromBuffer = miniIdx == _bufferMinibatchIdx && passIdx == _bufferPassIdx;
if (!fromBuffer) {
// This is necessary because setData below could delete data. If there's
// an outstanding copy request, this'll cause a segfault.
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->waitForCopyFinish();
}
setData(_dp->getMinibatch(miniIdx), passIdx);
} else {
setDataFromBuffer();
}
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(passType, passIdx, fromBuffer);
}
if (passIdx == getNumPasses() - 1) {
// Do double-buffering from next minibatch from the DataProvider
setBuffer(miniIdx == _dp->getNumMinibatches() - 1 ? NULL : &_dp->getMinibatch(miniIdx + 1), miniIdx + 1, 0);
} else {
// Do double-buffering from next microbatch within current minibatch
setBuffer(_data, miniIdx, passIdx + 1);
}
waitForTerminals(_numFwdTerminal, FPROP_TERMINAL);
}
void ConvNet::setDataFromBuffer() {
if (_bufferData != _data) {
delete _data;
}
_data = _bufferData;
_bufferData = NULL;
_bufferMinibatchIdx = -1;
_bufferPassIdx = -1;
}
void ConvNet::setData(CPUData& data, int passIdx) {
bool same = _data == _bufferData;
if (&data != _data) {
delete _data;
}
if (&data != _bufferData && !same) {
delete _bufferData;
_bufferData = NULL;
_bufferMinibatchIdx = -1;
_bufferPassIdx = -1;
}
_data = &data;
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->copyData(*_data, false, passIdx);
}
}
void ConvNet::setBuffer(CPUData* bufferData, int bufferMinibatchIdx, int bufferPassIdx) {
_bufferData = bufferData;
_bufferMinibatchIdx = bufferMinibatchIdx;
_bufferPassIdx = bufferPassIdx;
if (bufferData != NULL) {
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->copyData(*_bufferData, true, bufferPassIdx);
}
}
}
CPUData& ConvNet::getData() {
assert(_data != NULL);
return *_data;
}
void ConvNet::bprop(int passIdx, PASS_TYPE passType) {
_totalPassesDone++;
sendMessage(new BpropStartMessage(passType, passIdx), false);
waitForTerminals(_numBwdTerminal[passIdx], BPROP_TERMINAL);
reset(passIdx + 1);
}
void ConvNet::waitForTerminals(int numMsgs, MESSAGES msgType) {
for (int rcvd = 0; rcvd < numMsgs; rcvd++) {
Message* m = _msgQueue.dequeue();
assert(m->getType() == msgType);
delete m;
}
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost &tmp = getCost();
cost += tmp;
delete &tmp;
return cost;
}
Cost& ConvNet::getCost() {
Cost& cost = *new Cost();
for (int t = 0; t < _convNetThreads.size(); t++) {
Cost& tcost = _convNetThreads[t]->getCost();
cost += tcost;
delete &tcost;
}
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
Queue<Message*>& ConvNet::getMessageQueue() {
return _msgQueue;
}
intv& ConvNet::getDeviceIDs() {
return _deviceIDs;
}
ThreadSynchronizer& ConvNet::getSync() {
return *_sync;
}
void ConvNet::syncWithChildren() {
sendMessage(SYNC, false);
_sync->sync();
}
int ConvNet::getTotalPassesDone() {
return _totalPassesDone;
}
int ConvNet::getMinibatchSize() {
return _dp->getMinibatchSize();
}
int ConvNet::getNumReplicasMax() {
return _numReplicasMax;
}
int ConvNet::getNumReplicasMin() {
return _numReplicasMin;
}
int ConvNet::getNumPasses() {
return _numReplicasMax / _numReplicasMin;
}
void ConvNet::setTrainingProgress(double progress) {
_trainingProgress = progress;
}
double ConvNet::getTrainingProgress() const {
return _trainingProgress;
}
bool ConvNet::isConserveMemory() {
return _conserveMem;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
_baseErr = 0;
for (int p = 0; p < getNumPasses(); ++p) {
fprop(0, p, PASS_GC);
_baseErr += getCostValue();
bprop(p, PASS_GC);
}
// We call grad check only on the first replica,
// but because weights are aware of their fellow replicas,
// we can simultaneously perturb the weights of all
// replicas.
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
map<int, Layer*>& layers = it->second;
if (layers[0]->getDeviceID() >= 0 /*&& (layers[0]->getName() == "fc10")*/) { // If layer on GPU (data layers aren't)
layers[0]->checkGradient();
}
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
// Copies to all replicas
void ConvNet::checkGradient_copyWeightsToGPU(Matrix& weightsCPU, Weights& weights) {
int d = NVMatrix::getDeviceID();
for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) {
NVMatrix::setDeviceID(it->second->getDeviceID());
it->second->getW().copyFromHost(weightsCPU);
}
NVMatrix::setDeviceID(d);
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const std::string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
checkGradient_copyWeightsToGPU(weightsCPU, weights);
weightsCPU(i,j) = v;
double err = 0;
for (int p = 0; p < getNumPasses(); ++p) {
// printf("trying fprop %d\n", p);
fprop(0, p, PASS_GC);
// printf(" success\n");
err += getCostValue();
}
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan((double)numGrad(i,j)) || isinf((double)numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
checkGradient_copyWeightsToGPU(weightsCPU, weights);
}
}
Matrix gradCPU;
NVMatrix::setDeviceID(weights.getDeviceID());
map<int,NVMatrix*> mats;
for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) {
mats[it->first] = &it->second->getGrad();
}
weights.getReducer().reduce(mats, 1, false);
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(0, 6, 0, 4);
cout << "Numeric:" << endl;
numGrad.print(0, 6, 0, 4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
/*
* =======================================================================================================
* ConvNetThread
* =======================================================================================================
*/
ConvNetThread::ConvNetThread(PyObjectV* layerList, int deviceID, int deviceIdx, ConvNet* convNet)
: Thread(true, getDeviceCPUs(deviceID)), _deviceID(deviceID), _convNet(convNet) {
try {
int numLayers = layerList->size();
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = layerList->at(i);
std::string layerType = pyDictGetString(paramsDict, "type");
if (layerType != "data") {
intv& gpus = *pyDictGetIntV(paramsDict, "gpu");
int rid = indexOf(gpus, deviceIdx);
if (rid >= 0) {
initLayer(paramsDict, rid);
}
delete &gpus;
}
}
} catch (std::string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
ConvNetThread::~ConvNetThread() {
NVMatrix::setDeviceID(_deviceID);
NVMatrix::destroyCublas();
NVMatrix::destroyRandom();
for (NameLayerMap::const_iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
delete it->second;
}
_nameLayerMap.clear();
}
void ConvNetThread::startTimer() {
NVMatrix::syncStream();
_timer.start();
}
double ConvNetThread::stopTimer() {
NVMatrix::syncStream();
return _timer.stop();
}
void ConvNetThread::initLayer(PyObject* paramsDict, int replicaID) {
std::string type = pyDictGetString(paramsDict, "type");
std::string name = pyDictGetString(paramsDict, "name");
if (type == "fc") {
_nameLayerMap[name] = new FCLayer(this, paramsDict, replicaID, false);
} else if (type == "sfc") {
_nameLayerMap[name] = new SplitFCLayer(this, paramsDict, replicaID, false);
} else if (type == "conv") {
_nameLayerMap[name] = new ConvLayer(this, paramsDict, replicaID);
} else if (type == "local") {
_nameLayerMap[name] = new LocalUnsharedLayer(this, paramsDict, replicaID);
} else if (type == "pool") {
_nameLayerMap[name] = &PoolLayer::make(this, paramsDict, replicaID);
} else if (type == "cmpool") {
_nameLayerMap[name] = &CrossMapPoolLayer::make(this, paramsDict, replicaID);
} else if (type == "rnorm") {
_nameLayerMap[name] = new ResponseNormLayer(this, paramsDict, replicaID);
} else if (type == "cmrnorm") {
_nameLayerMap[name] = new CrossMapResponseNormLayer(this, paramsDict, replicaID);
} else if (type == "cnorm") {
_nameLayerMap[name] = new ContrastNormLayer(this, paramsDict, replicaID);
} else if (type == "softmax") {
_nameLayerMap[name] = new SoftmaxLayer(this, paramsDict, replicaID);
} else if (type == "eltsum") {
_nameLayerMap[name] = new EltwiseSumLayer(this, paramsDict, replicaID);
} else if (type == "eltmax") {
_nameLayerMap[name] = new EltwiseMaxLayer(this, paramsDict, replicaID);
} else if (type == "neuron") {
_nameLayerMap[name] = new NeuronLayer(this, paramsDict, replicaID);
} else if (type == "nailbed") {
_nameLayerMap[name] = new NailbedLayer(this, paramsDict, replicaID);
} else if (type == "blur") {
_nameLayerMap[name] = new GaussianBlurLayer(this, paramsDict, replicaID);
} else if (type == "href") {
_nameLayerMap[name] = new HorizontalReflectionLayer(this, paramsDict, replicaID);
} else if (type == "resize") {
_nameLayerMap[name] = new ResizeLayer(this, paramsDict, replicaID);
} else if (type == "rgb2yuv") {
_nameLayerMap[name] = new RGBToYUVLayer(this, paramsDict, replicaID);
} else if (type == "rgb2lab") {
_nameLayerMap[name] = new RGBToLABLayer(this, paramsDict, replicaID);
} else if (type == "rscale") {
_nameLayerMap[name] = new RandomScaleLayer(this, paramsDict, replicaID);
} else if (type == "crop") {
_nameLayerMap[name] = new CropLayer(this, paramsDict, replicaID);
} else if (type == "concat") {
_nameLayerMap[name] = new ConcatenationLayer(this, paramsDict, replicaID);
} else if (type == "pass") {
_nameLayerMap[name] = new PassThroughLayer(this, paramsDict, replicaID);
} else if (type == "dropout") {
_nameLayerMap[name] = new DropoutLayer(this, paramsDict, replicaID);
} else if (type == "dropout2") {
_nameLayerMap[name] = new Dropout2Layer(this, paramsDict, replicaID);
} else if (strncmp(type.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::make(this, paramsDict, type, replicaID);
_nameLayerMap[name] = c;
_costs.push_back(c);
} else {
throw std::string("Unknown layer type ") + type;
}
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNetThread::initCuda() {
NVMatrix::setDeviceID(_deviceID);
checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferShared));
for (int i = 0; i < _convNet->getDeviceIDs().size(); i++) {
int d = _convNet->getDeviceIDs()[i];
if (d != _deviceID) {
if (NVMatrix::canAccessPeer(_deviceID, d)) {
printf("Enabling peer access GPU %d --> GPU %d\n", NVMatrix::getDeviceID(), d);
checkCudaErrors(hipDeviceEnablePeerAccess(d, 0));
} else {
printf("No peer access GPU %d --> GPU %d\n", _deviceID, d);
}
}
}
// NVMatrix::syncStream();
NVMatrix::initCublas();
char* randomSeedEnv;
int randomSeed = time(0);
randomSeedEnv = getenv("CONVNET_RANDOM_SEED");
if(randomSeedEnv != NULL) {
randomSeed = atoi(randomSeedEnv);
}
//NVMatrix::initRandom(/*7*/);
NVMatrix::initRandom(randomSeed);
//srand(time(0));
}
void* ConvNetThread::run() {
initCuda();
bool exit = false;
while (!exit) {
Message* m = _msgQueue.dequeue();
if (m->getType() == FPROP_READY) {
FpropMessage* msg = static_cast<FpropMessage*>(m);
msg->getToLayer().fprop(msg->getPassType(), msg->getPassIdx());
} else if (m->getType() == BPROP_READY) {
BpropMessage* msg = static_cast<BpropMessage*>(m);
msg->getToLayer().incRcvdBInputMsgs();
msg->getToLayer().bprop(msg->getPassType(), msg->getPassIdx());
} else if (m->getType() == BPROP_START) {
BpropStartMessage* msg = static_cast<BpropStartMessage*>(m);
for (int i = 0; i < _costs.size(); i++) {
dynamic_cast<Layer*>(_costs[i])->bprop(msg->getPassType(), msg->getPassIdx());
}
} else if (m->getType() == SYNC) {
NVMatrix::syncStream();
_convNet->getSync().sync();
} else if (m->getType() == COPY_TO_CPU) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->copyToCPU();
}
} else if (m->getType() == COPY_TO_GPU) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->copyToGPU();
}
} else if (m->getType() == RESET) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->reset();
}
} else if (m->getType() == RESET_PASS_IDX) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->resetPassIdx();
}
} else if (m->getType() == UPDATE_WEIGHTS) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->updateWeights();
}
} else if (m->getType() == CONSTRAIN_WEIGHTS) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->constrainWeights();
}
} else if (m->getType() == EXIT_CONVNET) {
exit = true;
}
delete m;
}
return NULL;
}
Cost& ConvNetThread::getCost() {
// In a single ConvNetThread, all costs are guaranteed to be different
// (i.e. not replicas of one another)
return *new Cost(_costs);
}
Layer& ConvNetThread::getLayer(std::string& name) {
return *_nameLayerMap[name];
}
int ConvNetThread::getDeviceID() {
return _deviceID;
}
Queue<Message*>& ConvNetThread::getMessageQueue() {
return _msgQueue;
}
vector<CostLayer*>& ConvNetThread::getCostLayers() {
return _costs;
}
NameLayerMap& ConvNetThread::getLayerMap() {
return _nameLayerMap;
}
ConvNet& ConvNetThread::getConvNet() {
return *_convNet;
}
| 33ee3d23921764033b39f0047cfa0446a155fa84.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <iostream>
#include <string>
#include <set>
#include <map>
#include "../../nvmatrix/include/nvmatrix.cuh"
#include "../../nvmatrix/include/nvmatrix_operators.cuh"
#include "../../util/include/matrix.h"
#include "../include/convnet.cuh"
#include "../include/util.cuh"
using namespace std;
/*
* =======================
* ConvNet
* =======================
*/
ConvNet::ConvNet(PyObject* layerParams, intv& deviceIDs,
int minibatchSize, bool conserveMem) : Thread(true) {
_deviceIDs = deviceIDs;
_data = NULL;
_bufferData = NULL;
_bufferMinibatchIdx = -1;
_bufferPassIdx = -1;
_trainingProgress = 0;
_totalPassesDone = 0;
_conserveMem = conserveMem;
_sync = new ThreadSynchronizer(deviceIDs.size() + 1);
PyObjectV* layerList = pyDictGetValues(layerParams);
std::sort(layerList->begin(), layerList->end(), LayerIDComparator());
_dataCopyPD = new PipeDispenserBlocking(DIVUP(_deviceIDs.size(),2)); // hard-coded for now
initDataLayers(layerList);
initGPUThreads(layerList);
connectReplicas(); // Connect replicas to one another
connectChildren(layerParams); // Connect forward/backward links in graph
_numFwdTerminal = 0;
// Execute post-initialization stuff
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
for (int r = 0; r < it->second.size(); r++) {
_numFwdTerminal += it->second[r]->getNext().size() == 0;
if (it->second[r]->getNext().size() == 0) {
printf("Fwd terminal: %s\n", it->second[r]->getName().c_str());
}
it->second[r]->postInit();
}
}
// Find and count the terminal nodes in the backward pass
for (int p = 0; p < getNumPasses(); p++) {
set<Layer*> visited;
_numBwdTerminal[p] = 0;
for (int t = 0; t < _convNetThreads.size(); t++) {
vector<CostLayer*>& cl = _convNetThreads[t]->getCostLayers();
for (int c = 0; c < cl.size(); c++) {
findBwdTerminal(*cl[c], visited, _numBwdTerminal[p], p);
}
}
}
_dp = new DataProvider(minibatchSize);
// Py_DECREF(layerList);
delete layerList;
}
ConvNet::~ConvNet() {
for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) {
(*it)->getMessageQueue().enqueue(new Message(EXIT_CONVNET));
(*it)->join();
delete *it;
}
for (DataLayerVector::const_iterator it = _dataLayers.begin(); it != _dataLayers.end(); ++it) {
delete *it;
}
for (intv::const_iterator it = _deviceIDs.begin(); it != _deviceIDs.end(); ++it) {
DEVICE_MEMORY_MANAGER::destroyInstance(*it);
}
HOST_MEMORY_MANAGER::destroyInstance();
delete _sync;
delete _dataCopyPD;
delete _dp;
}
void ConvNet::stop() {
getWorkerQueue().enqueue(new ExitWorker(*this));
join();
}
PipeDispenser& ConvNet::getDataCopyPD() {
return *_dataCopyPD;
}
void ConvNet::initDataLayers(PyObjectV* layerList) {
for (int i = 0; i < layerList->size(); i++) {
PyObject* paramsDict = layerList->at(i);
std::string layerType = pyDictGetString(paramsDict, "type");
if (layerType == "data") {
int numReplicas = pyDictGetInt(paramsDict, "numReplicas");
for (int r = 0; r < numReplicas; ++r) {
DataLayer* dataLayer = new DataLayer(this, paramsDict, r);
_dataLayers.push_back(dataLayer);
_layerMap[dataLayer->getName()][r] = dataLayer;
}
}
}
}
void ConvNet::initGPUThreads(PyObjectV* layerList) {
// Initialize GPU worker threads
for (int i = 0; i < _deviceIDs.size(); ++i) {
ConvNetThread* cng = new ConvNetThread(layerList, _deviceIDs[i], i, this);
_convNetThreads.push_back(cng);
for (NameLayerMap::iterator it = cng->getLayerMap().begin(); it != cng->getLayerMap().end(); ++it) {
const std::string& name = it->first;
Layer* layer = it->second;
_layerMap[name][layer->getReplicaID()] = layer;
}
}
}
void ConvNet::connectReplicas() {
_numReplicasMax = 0;
_numReplicasMin = 1 << 16;
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
_numReplicasMax = max(_numReplicasMax, int(it->second.size()));
_numReplicasMin = min(_numReplicasMin, int(it->second.size()));
for (map<int,Layer*>::iterator it2 = it->second.begin(); it2 != it->second.end(); ++it2) {
Layer& l1 = *it2->second;
for (map<int,Layer*>::iterator it3 = it->second.begin(); it3 != it->second.end(); ++it3) {
Layer& l2 = *it3->second;
l1.addReplica(l2);
}
}
}
}
void ConvNet::connectChildren(PyObject* layerParams) {
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
PyObject* paramsDict = PyDict_GetItemString(layerParams, it->first.c_str());
PyObject* inputList = PyDict_GetItemString(paramsDict, "inputs");
if (inputList != NULL) {
// Iterate over "replicas" of this layer
int numReplicas = _layerMap[it->first].size();
for (int i = 0; i < PyList_GET_SIZE(inputList); i++) {
std::string inputName = PyString_AsString(PyList_GetItem(inputList, i));
int numReplicasPrev = _layerMap[inputName].size();
// How many replicas from the previous layer must this layer be connected to?
int numInputReplicas = numReplicasPrev / numReplicas;
for (int r = 0; r < numReplicas; r++) {
for (int rp = r, ridx = 0; ridx < numInputReplicas; rp += numReplicas, ridx++) {
it->second[r]->addPrev(*_layerMap[inputName][rp], ridx);
_layerMap[inputName][rp]->addNext(*it->second[r]);
}
}
}
}
}
}
void ConvNet::findBwdTerminal(Layer& l, set<Layer*>& visited, int& terminal, int passIdx) {
if (visited.count(&l) == 0) {
visited.insert(&l);
if (l.isGradConsumer()) {
bool hasPrevConsumer = false;
if (l.getPrev().size() > 0) {
for (int i = 0; i < l.getPrev()[0].size(); i++) {
// Looking only at 0th replica is fine to see if you have
// grad consumers below you.
hasPrevConsumer |= l.getPrev()[0][i]->isGradConsumer();
}
}
if (!hasPrevConsumer || !l.isGradProducer() || (passIdx + 1 < l.getNumReplicasPrev() && l.getNumReplicasPrev() > l.getNumReplicas())) {
terminal++;
l.setBwdTerminal(passIdx);
printf("found bwd terminal %s[%d] in passIdx=%d\n", l.getName().c_str(), l.getReplicaID(), passIdx);
} else if (l.isGradProducer()) {
for (int r = 0; r < l.getPrev().size(); r++) {
for (int i = 0; i < l.getPrev()[r].size(); i++) {
findBwdTerminal(*l.getPrev()[r][i], visited, terminal, passIdx);
}
}
}
}
}
}
void* ConvNet::run() {
for (vector<ConvNetThread*>::const_iterator it = _convNetThreads.begin(); it != _convNetThreads.end(); ++it) {
(*it)->start();
}
// The manager thread defaults to using the GPU of the first worker.
// Put more logic here if this is inappropriate.
NVMatrix::setDeviceID(_convNetThreads[0]->getDeviceID());
copyToGPU();
bool exit = false;
while (!exit) {
Worker* worker = _workerQueue.dequeue();
exit = worker->run();
delete worker;
}
return NULL;
}
Queue<Worker*>& ConvNet::getWorkerQueue() {
return _workerQueue;
}
Queue<WorkResult*>& ConvNet::getResultQueue() {
return _resultQueue;
}
DataProvider& ConvNet::getDataProvider() {
return *_dp;
}
Layer& ConvNet::getLayer(std::string& name, int replicaID) {
return *_layerMap[name][replicaID];
}
void ConvNet::sendMessage(MESSAGES msg, bool sync) {
sendMessage(new Message(msg), sync);
}
void ConvNet::sendMessage(Message* msg, bool sync) {
for (int i = 0; i < _convNetThreads.size(); i++) {
_convNetThreads[i]->getMessageQueue().enqueue(msg->clone());
}
delete msg;
if (sync) {
syncWithChildren();
}
}
void ConvNet::copyToCPU() {
sendMessage(COPY_TO_CPU, true);
}
void ConvNet::copyToGPU() {
sendMessage(COPY_TO_GPU, false);
}
void ConvNet::updateWeights(int passIdx) {
sendMessage(UPDATE_WEIGHTS, true);
sendMessage(CONSTRAIN_WEIGHTS, true);
}
void ConvNet::reset(int passIdx) {
sendMessage((passIdx % getNumPasses()) == 0 ? RESET : RESET_PASS_IDX, false);
}
void ConvNet::reset() {
reset(0);
}
// Fprop given data
void ConvNet::fprop(CPUData& data, int passIdx, PASS_TYPE passType) {
reset(passIdx);
// This is necessary because setData below could delete data. If there's
// an outstanding copy request, this'll cause a segfault.
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->waitForCopyFinish();
}
setData(data, passIdx);
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(passType, passIdx, false);
}
waitForTerminals(_numFwdTerminal, FPROP_TERMINAL);
}
// Fprop given minibatch idx
void ConvNet::fprop(int miniIdx, int passIdx, PASS_TYPE passType) {
reset(passIdx);
bool fromBuffer = miniIdx == _bufferMinibatchIdx && passIdx == _bufferPassIdx;
if (!fromBuffer) {
// This is necessary because setData below could delete data. If there's
// an outstanding copy request, this'll cause a segfault.
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->waitForCopyFinish();
}
setData(_dp->getMinibatch(miniIdx), passIdx);
} else {
setDataFromBuffer();
}
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->fprop(passType, passIdx, fromBuffer);
}
if (passIdx == getNumPasses() - 1) {
// Do double-buffering from next minibatch from the DataProvider
setBuffer(miniIdx == _dp->getNumMinibatches() - 1 ? NULL : &_dp->getMinibatch(miniIdx + 1), miniIdx + 1, 0);
} else {
// Do double-buffering from next microbatch within current minibatch
setBuffer(_data, miniIdx, passIdx + 1);
}
waitForTerminals(_numFwdTerminal, FPROP_TERMINAL);
}
void ConvNet::setDataFromBuffer() {
if (_bufferData != _data) {
delete _data;
}
_data = _bufferData;
_bufferData = NULL;
_bufferMinibatchIdx = -1;
_bufferPassIdx = -1;
}
void ConvNet::setData(CPUData& data, int passIdx) {
bool same = _data == _bufferData;
if (&data != _data) {
delete _data;
}
if (&data != _bufferData && !same) {
delete _bufferData;
_bufferData = NULL;
_bufferMinibatchIdx = -1;
_bufferPassIdx = -1;
}
_data = &data;
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->copyData(*_data, false, passIdx);
}
}
void ConvNet::setBuffer(CPUData* bufferData, int bufferMinibatchIdx, int bufferPassIdx) {
_bufferData = bufferData;
_bufferMinibatchIdx = bufferMinibatchIdx;
_bufferPassIdx = bufferPassIdx;
if (bufferData != NULL) {
for (int i = 0; i < _dataLayers.size(); i++) {
_dataLayers[i]->copyData(*_bufferData, true, bufferPassIdx);
}
}
}
CPUData& ConvNet::getData() {
assert(_data != NULL);
return *_data;
}
void ConvNet::bprop(int passIdx, PASS_TYPE passType) {
_totalPassesDone++;
sendMessage(new BpropStartMessage(passType, passIdx), false);
waitForTerminals(_numBwdTerminal[passIdx], BPROP_TERMINAL);
reset(passIdx + 1);
}
void ConvNet::waitForTerminals(int numMsgs, MESSAGES msgType) {
for (int rcvd = 0; rcvd < numMsgs; rcvd++) {
Message* m = _msgQueue.dequeue();
assert(m->getType() == msgType);
delete m;
}
}
// Same as getCost() but adds results to given cost and returns it
Cost& ConvNet::getCost(Cost& cost) {
Cost &tmp = getCost();
cost += tmp;
delete &tmp;
return cost;
}
Cost& ConvNet::getCost() {
Cost& cost = *new Cost();
for (int t = 0; t < _convNetThreads.size(); t++) {
Cost& tcost = _convNetThreads[t]->getCost();
cost += tcost;
delete &tcost;
}
return cost;
}
double ConvNet::getCostValue() {
Cost& cost = getCost();
double val = cost.getValue();
delete &cost;
return val;
}
Queue<Message*>& ConvNet::getMessageQueue() {
return _msgQueue;
}
intv& ConvNet::getDeviceIDs() {
return _deviceIDs;
}
ThreadSynchronizer& ConvNet::getSync() {
return *_sync;
}
void ConvNet::syncWithChildren() {
sendMessage(SYNC, false);
_sync->sync();
}
int ConvNet::getTotalPassesDone() {
return _totalPassesDone;
}
int ConvNet::getMinibatchSize() {
return _dp->getMinibatchSize();
}
int ConvNet::getNumReplicasMax() {
return _numReplicasMax;
}
int ConvNet::getNumReplicasMin() {
return _numReplicasMin;
}
int ConvNet::getNumPasses() {
return _numReplicasMax / _numReplicasMin;
}
void ConvNet::setTrainingProgress(double progress) {
_trainingProgress = progress;
}
double ConvNet::getTrainingProgress() const {
return _trainingProgress;
}
bool ConvNet::isConserveMemory() {
return _conserveMem;
}
/*
* Gradient checking stuff
*/
void ConvNet::checkGradients() {
_numFailures = 0;
_numTests = 0;
_baseErr = 0;
for (int p = 0; p < getNumPasses(); ++p) {
fprop(0, p, PASS_GC);
_baseErr += getCostValue();
bprop(p, PASS_GC);
}
// We call grad check only on the first replica,
// but because weights are aware of their fellow replicas,
// we can simultaneously perturb the weights of all
// replicas.
for (NameReplicaLayerMap::iterator it = _layerMap.begin(); it != _layerMap.end(); ++it) {
map<int, Layer*>& layers = it->second;
if (layers[0]->getDeviceID() >= 0 /*&& (layers[0]->getName() == "fc10")*/) { // If layer on GPU (data layers aren't)
layers[0]->checkGradient();
}
}
cout << "------------------------" << endl;
if (_numFailures > 0) {
cout << _numFailures << "/" << _numTests << " TESTS FAILED" << endl;
} else {
cout << "ALL " << _numTests << " TESTS PASSED" << endl;
}
}
// Copies to all replicas
void ConvNet::checkGradient_copyWeightsToGPU(Matrix& weightsCPU, Weights& weights) {
int d = NVMatrix::getDeviceID();
for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) {
NVMatrix::setDeviceID(it->second->getDeviceID());
it->second->getW().copyFromHost(weightsCPU);
}
NVMatrix::setDeviceID(d);
}
/*
* name: weight matrix name
* eps: finite difference step
*/
bool ConvNet::checkGradient(const std::string& name, float eps, Weights& weights) {
Matrix numGrad(weights.getNumRows(), weights.getNumCols());
Matrix diff(numGrad);
numGrad.apply(Matrix::ZERO);
Matrix weightsCPU;
weights.getW().copyToHost(weightsCPU, true);
for(int i = 0; i < weights.getNumRows(); i++) {
for (int j = 0; j < weights.getNumCols(); j++) {
float v = weightsCPU(i,j);
weightsCPU(i,j) += eps;
checkGradient_copyWeightsToGPU(weightsCPU, weights);
weightsCPU(i,j) = v;
double err = 0;
for (int p = 0; p < getNumPasses(); ++p) {
// printf("trying fprop %d\n", p);
fprop(0, p, PASS_GC);
// printf(" success\n");
err += getCostValue();
}
numGrad(i,j) = (err - _baseErr) / (_data->getNumCases() * eps);
if (isnan((double)numGrad(i,j)) || isinf((double)numGrad(i,j))) {
cout << "Numerical computation produced nan or inf when checking '" << name << "': " << numGrad(i,j) << endl;
cout << "Consider reducing the sizes of the weights or finite difference steps." << endl;
cout << "Exiting." << endl;
exit(1);
}
checkGradient_copyWeightsToGPU(weightsCPU, weights);
}
}
Matrix gradCPU;
NVMatrix::setDeviceID(weights.getDeviceID());
map<int,NVMatrix*> mats;
for (map<int, Weights*>::const_iterator it = weights.getReplicas().begin(); it != weights.getReplicas().end(); ++it) {
mats[it->first] = &it->second->getGrad();
}
weights.getReducer().reduce(mats, 1, false);
weights.getGrad().copyToHost(gradCPU, true);
gradCPU.scale(-1.0 / _data->getNumCases());
float analNorm = gradCPU.norm();
float numNorm = numGrad.norm();
numGrad.subtract(gradCPU, diff);
float relErr = diff.norm() / analNorm;
bool fail = relErr >= GC_REL_ERR_THRESH;
if (fail || !GC_SUPPRESS_PASSES) {
cout << "========================" << endl;
printf("(%s) %s GRADIENT CHECK\n", fail ? "****FAIL****" : "PASS", name.c_str());
cout << "========================" << endl;
cout << "Analytic:" << endl;
gradCPU.print(0, 6, 0, 4);
cout << "Numeric:" << endl;
numGrad.print(0, 6, 0, 4);
printf("Analytic norm: %e\n", analNorm);
printf("Numeric norm: %e\n", numNorm);
printf("Relative error: %e\n", relErr);
}
_numTests++;
_numFailures += fail;
return fail;
}
/*
* =======================================================================================================
* ConvNetThread
* =======================================================================================================
*/
ConvNetThread::ConvNetThread(PyObjectV* layerList, int deviceID, int deviceIdx, ConvNet* convNet)
: Thread(true, getDeviceCPUs(deviceID)), _deviceID(deviceID), _convNet(convNet) {
try {
int numLayers = layerList->size();
for (int i = 0; i < numLayers; i++) {
PyObject* paramsDict = layerList->at(i);
std::string layerType = pyDictGetString(paramsDict, "type");
if (layerType != "data") {
intv& gpus = *pyDictGetIntV(paramsDict, "gpu");
int rid = indexOf(gpus, deviceIdx);
if (rid >= 0) {
initLayer(paramsDict, rid);
}
delete &gpus;
}
}
} catch (std::string& s) {
cout << "Error creating ConvNet: " << s << endl;
exit(1);
}
}
ConvNetThread::~ConvNetThread() {
NVMatrix::setDeviceID(_deviceID);
NVMatrix::destroyCublas();
NVMatrix::destroyRandom();
for (NameLayerMap::const_iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
delete it->second;
}
_nameLayerMap.clear();
}
void ConvNetThread::startTimer() {
NVMatrix::syncStream();
_timer.start();
}
double ConvNetThread::stopTimer() {
NVMatrix::syncStream();
return _timer.stop();
}
void ConvNetThread::initLayer(PyObject* paramsDict, int replicaID) {
std::string type = pyDictGetString(paramsDict, "type");
std::string name = pyDictGetString(paramsDict, "name");
if (type == "fc") {
_nameLayerMap[name] = new FCLayer(this, paramsDict, replicaID, false);
} else if (type == "sfc") {
_nameLayerMap[name] = new SplitFCLayer(this, paramsDict, replicaID, false);
} else if (type == "conv") {
_nameLayerMap[name] = new ConvLayer(this, paramsDict, replicaID);
} else if (type == "local") {
_nameLayerMap[name] = new LocalUnsharedLayer(this, paramsDict, replicaID);
} else if (type == "pool") {
_nameLayerMap[name] = &PoolLayer::make(this, paramsDict, replicaID);
} else if (type == "cmpool") {
_nameLayerMap[name] = &CrossMapPoolLayer::make(this, paramsDict, replicaID);
} else if (type == "rnorm") {
_nameLayerMap[name] = new ResponseNormLayer(this, paramsDict, replicaID);
} else if (type == "cmrnorm") {
_nameLayerMap[name] = new CrossMapResponseNormLayer(this, paramsDict, replicaID);
} else if (type == "cnorm") {
_nameLayerMap[name] = new ContrastNormLayer(this, paramsDict, replicaID);
} else if (type == "softmax") {
_nameLayerMap[name] = new SoftmaxLayer(this, paramsDict, replicaID);
} else if (type == "eltsum") {
_nameLayerMap[name] = new EltwiseSumLayer(this, paramsDict, replicaID);
} else if (type == "eltmax") {
_nameLayerMap[name] = new EltwiseMaxLayer(this, paramsDict, replicaID);
} else if (type == "neuron") {
_nameLayerMap[name] = new NeuronLayer(this, paramsDict, replicaID);
} else if (type == "nailbed") {
_nameLayerMap[name] = new NailbedLayer(this, paramsDict, replicaID);
} else if (type == "blur") {
_nameLayerMap[name] = new GaussianBlurLayer(this, paramsDict, replicaID);
} else if (type == "href") {
_nameLayerMap[name] = new HorizontalReflectionLayer(this, paramsDict, replicaID);
} else if (type == "resize") {
_nameLayerMap[name] = new ResizeLayer(this, paramsDict, replicaID);
} else if (type == "rgb2yuv") {
_nameLayerMap[name] = new RGBToYUVLayer(this, paramsDict, replicaID);
} else if (type == "rgb2lab") {
_nameLayerMap[name] = new RGBToLABLayer(this, paramsDict, replicaID);
} else if (type == "rscale") {
_nameLayerMap[name] = new RandomScaleLayer(this, paramsDict, replicaID);
} else if (type == "crop") {
_nameLayerMap[name] = new CropLayer(this, paramsDict, replicaID);
} else if (type == "concat") {
_nameLayerMap[name] = new ConcatenationLayer(this, paramsDict, replicaID);
} else if (type == "pass") {
_nameLayerMap[name] = new PassThroughLayer(this, paramsDict, replicaID);
} else if (type == "dropout") {
_nameLayerMap[name] = new DropoutLayer(this, paramsDict, replicaID);
} else if (type == "dropout2") {
_nameLayerMap[name] = new Dropout2Layer(this, paramsDict, replicaID);
} else if (strncmp(type.c_str(), "cost.", 5) == 0) {
CostLayer *c = &CostLayer::make(this, paramsDict, type, replicaID);
_nameLayerMap[name] = c;
_costs.push_back(c);
} else {
throw std::string("Unknown layer type ") + type;
}
}
/*
* This executes in a new CPU thread so it's OK to initialize CUDA stuff here.
*/
void ConvNetThread::initCuda() {
NVMatrix::setDeviceID(_deviceID);
checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared));
for (int i = 0; i < _convNet->getDeviceIDs().size(); i++) {
int d = _convNet->getDeviceIDs()[i];
if (d != _deviceID) {
if (NVMatrix::canAccessPeer(_deviceID, d)) {
printf("Enabling peer access GPU %d --> GPU %d\n", NVMatrix::getDeviceID(), d);
checkCudaErrors(cudaDeviceEnablePeerAccess(d, 0));
} else {
printf("No peer access GPU %d --> GPU %d\n", _deviceID, d);
}
}
}
// NVMatrix::syncStream();
NVMatrix::initCublas();
char* randomSeedEnv;
int randomSeed = time(0);
randomSeedEnv = getenv("CONVNET_RANDOM_SEED");
if(randomSeedEnv != NULL) {
randomSeed = atoi(randomSeedEnv);
}
//NVMatrix::initRandom(/*7*/);
NVMatrix::initRandom(randomSeed);
//srand(time(0));
}
void* ConvNetThread::run() {
initCuda();
bool exit = false;
while (!exit) {
Message* m = _msgQueue.dequeue();
if (m->getType() == FPROP_READY) {
FpropMessage* msg = static_cast<FpropMessage*>(m);
msg->getToLayer().fprop(msg->getPassType(), msg->getPassIdx());
} else if (m->getType() == BPROP_READY) {
BpropMessage* msg = static_cast<BpropMessage*>(m);
msg->getToLayer().incRcvdBInputMsgs();
msg->getToLayer().bprop(msg->getPassType(), msg->getPassIdx());
} else if (m->getType() == BPROP_START) {
BpropStartMessage* msg = static_cast<BpropStartMessage*>(m);
for (int i = 0; i < _costs.size(); i++) {
dynamic_cast<Layer*>(_costs[i])->bprop(msg->getPassType(), msg->getPassIdx());
}
} else if (m->getType() == SYNC) {
NVMatrix::syncStream();
_convNet->getSync().sync();
} else if (m->getType() == COPY_TO_CPU) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->copyToCPU();
}
} else if (m->getType() == COPY_TO_GPU) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->copyToGPU();
}
} else if (m->getType() == RESET) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->reset();
}
} else if (m->getType() == RESET_PASS_IDX) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->resetPassIdx();
}
} else if (m->getType() == UPDATE_WEIGHTS) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->updateWeights();
}
} else if (m->getType() == CONSTRAIN_WEIGHTS) {
for (NameLayerMap::iterator it = _nameLayerMap.begin(); it != _nameLayerMap.end(); ++it) {
it->second->constrainWeights();
}
} else if (m->getType() == EXIT_CONVNET) {
exit = true;
}
delete m;
}
return NULL;
}
Cost& ConvNetThread::getCost() {
// In a single ConvNetThread, all costs are guaranteed to be different
// (i.e. not replicas of one another)
return *new Cost(_costs);
}
Layer& ConvNetThread::getLayer(std::string& name) {
return *_nameLayerMap[name];
}
int ConvNetThread::getDeviceID() {
return _deviceID;
}
Queue<Message*>& ConvNetThread::getMessageQueue() {
return _msgQueue;
}
vector<CostLayer*>& ConvNetThread::getCostLayers() {
return _costs;
}
NameLayerMap& ConvNetThread::getLayerMap() {
return _nameLayerMap;
}
ConvNet& ConvNetThread::getConvNet() {
return *_convNet;
}
|
2275323cfb69043a92902badc16c0f3cb5d5e6a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to ccwu@cs.unc.edu
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include "stdio.h"
#include <iostream>
#include <assert.h>
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//----------------------------------------------------------------
//Begin SiftGPU setting section.
//////////////////////////////////////////////////////////
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
/////////////////////////////////////////////////////////
//filter kernel width range (don't change this)
#define KERNEL_MAX_WIDTH 33
#define KERNEL_MIN_WIDTH 5
//////////////////////////////////////////////////////////
//horizontal filter block size (32, 64, 128, 256, 512)
#define FILTERH_TILE_WIDTH 128
//thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16)
#define FILTERV_BLOCK_WIDTH 16
#define FILTERV_BLOCK_HEIGHT 32
//The corresponding image patch for a thread block
#define FILTERV_PIXEL_PER_THREAD 4
#define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH
#define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT)
//////////////////////////////////////////////////////////
//thread block size for computing Difference of Gaussian
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
#define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX)
#define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY)
//////////////////////////////////////////////////////////
//thread block size for keypoint detection
#define KEY_BLOCK_LOG_DIMX 5 // 3
#define KEY_BLOCK_LOG_DIMY 2 // 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced..
//but it seems uncoalesced writes don't affect the speed
//////////////////////////////////////////////////////////
//thread block size for initializing list generation (64, 128, 256, 512 ...)
#define HIST_INIT_WIDTH 128
//thread block size for generating feature list (32, 64, 128, 256, 512, ...)
#define LISTGEN_BLOCK_DIM 128
/////////////////////////////////////////////////////////
//how many keypoint orientations to compute in a block
#define ORIENTATION_COMPUTE_PER_BLOCK 64
//how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32)
#define DESCRIPTOR_COMPUTE_PER_BLOCK 4
#define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// block size for the keypoint descriptor normalization kernel
// it is assumed that one descriptor is processed by one warp
// -> have to be multiple of warp size (32)
#define DESCRIPTOR_NORMALIZE_PER_BLOCK 128
#else
//how many keypoint descriptor to normalized in a block (32, ...)
#define DESCRIPTOR_NORMALIZE_PER_BLOCK 32
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
///////////////////////////////////////////
//Thread block size for visualization
//(This doesn't affect the speed of computation)
#define BLOCK_LOG_DIM 4
#define BLOCK_DIM (1 << BLOCK_LOG_DIM)
//End SiftGPU setting section.
//----------------------------------------------------------------
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, hipReadModeElementType> texData;
texture<unsigned char, 1, hipReadModeNormalizedFloat> texDataB;
texture<float2, 2, hipReadModeElementType> texDataF2;
texture<float4, 1, hipReadModeElementType> texDataF4;
texture<int4, 1, hipReadModeElementType> texDataI4;
texture<int4, 1, hipReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width)
return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
// add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
// make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
// make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
// make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width)
return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i, row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width)
return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx = (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1:
hipLaunchKernelGGL(( UpsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width);
break;
case 2:
hipLaunchKernelGGL(( UpsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width);
break;
case 3:
hipLaunchKernelGGL(( UpsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width);
break;
default:
break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width)
return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width)
return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth();
int dst_width = dst->GetImgWidth();
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1:
hipLaunchKernelGGL(( DownsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width);
break;
case 2:
hipLaunchKernelGGL(( DownsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width);
break;
case 3:
hipLaunchKernelGGL(( DownsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width);
break;
default:
hipLaunchKernelGGL(( DownsampleKernel) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth();
int height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1) / FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
hipLaunchKernelGGL(( ChannelReduce_Convert_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
else
{
src->BindTexture(texData);
hipLaunchKernelGGL(( ChannelReduce_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
}
__global__ void ConvertByteToFloat_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texDataB, index);
}
void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst)
{
int width = src->GetImgWidth();
int height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1) / FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
src->BindTexture(texDataB);
hipLaunchKernelGGL(( ConvertByteToFloat_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ); //
width = 2*sz + 1;
if(width > KERNEL_MAX_WIDTH)
{
//filter size truncation
sz = KERNEL_MAX_WIDTH >> 1;
width = KERNEL_MAX_WIDTH;
}
else if(width < KERNEL_MIN_WIDTH)
{
sz = KERNEL_MIN_WIDTH >> 1;
width =KERNEL_MIN_WIDTH;
}
float rv = 1.0f / (sigma*sigma), v, ksum = 0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f / ksum;
for(i = 0; i< width ;i++)
kernel[i] *= rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth();
int height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
hipLaunchKernelGGL(( FilterH<FW>), dim3(gridh), dim3(blockh), 0, 0, (float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT);
hipLaunchKernelGGL(( FilterV<FW>), dim3(gridv), dim3(blockv), 0, 0, (float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// OpenGL version : 18ms
// CUDA version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH];
int width;
CreateFilterKernel(sigma, filter_kernel, width);
hipMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, hipMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
texture<float, 1, hipReadModeElementType> texC;
texture<float, 1, hipReadModeElementType> texP;
texture<float, 1, hipReadModeElementType> texN;
#ifdef GPU_HESSIAN
texture<float, 1, hipReadModeElementType> texG;
// compute 3x3 Hessian values from symmetric differences
#define COMPUTE_HESSIAN(tex, idx) \
float v11 = tex1Dfetch(tex, idx - width - 1); \
float v12 = tex1Dfetch(tex, idx - width); \
float v13 = tex1Dfetch(tex, idx - width + 1); \
\
float v21 = tex1Dfetch(tex, idx - 1); \
float v22 = tex1Dfetch(tex, idx); \
float v23 = tex1Dfetch(tex, idx + 1); \
\
float v31 = tex1Dfetch(tex, idx + width - 1); \
float v32 = tex1Dfetch(tex, idx + width); \
float v33 = tex1Dfetch(tex, idx + width + 1); \
\
float Lxx = (v21 - 2.0f*v22 + v23); \
float Lyy = (v12 - 2.0f*v22 + v32); \
float Lxy = (v13 - v11 + v31 - v33) * 0.25f; \
void __global__ ComputeHessian_Kernel(float *hessian, float2 *got, int width, int height, float norm)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if((col < width) && (row < height))
{
int index = IMUL(row, width) + col;
COMPUTE_HESSIAN(texC, index)
// compute determinant of hessian matrix, normalize and write out
hessian[index] = (Lxx*Lyy - Lxy*Lxy)*norm;
// precompute gradient and rotation
float dx = v23 - v21;
float dy = v32 - v12;
float gradient = 0.5f * sqrt(dx*dx + dy*dy);
float rot = ((gradient == 0.0f) ? 0.0f : atan2(dy, dx));
got[index] = make_float2(gradient, rot);
}
}
void __global__ ComputeHessian_Kernel(float *hessian, int width, int height, float norm)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if((col < width) && (row < height))
{
int index = IMUL(row, width) + col;
COMPUTE_HESSIAN(texC, index)
// compute determinant of hessian matrix, normalize and write out
hessian[index] = (Lxx*Lyy - Lxy*Lxy)*norm;
}
}
void ProgramCU::ComputeHessian(CuTexImage* gus, CuTexImage* dog, CuTexImage* got, float norm)
{
int width = gus->GetImgWidth();
int height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1) / DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1) / DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
if(got->_cuData)
hipLaunchKernelGGL(( ComputeHessian_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dog->_cuData, (float2*)got->_cuData, width, height, norm*norm);
else
hipLaunchKernelGGL(( ComputeHessian_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dog->_cuData, width, height, norm*norm);
}
#else
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp;
float dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f ? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth();
int height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, width, height);
}
#endif // GPU_HESSIAN
#ifdef GPU_HESSIAN
// GPU_HESSIAN: added test (response<0) and (response>0)
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1); \
datai[1] = tex1Dfetch(tex, idx); \
datai[2] = tex1Dfetch(tex, idx + 1); \
if(response > nmax) \
{ \
nmax = max(nmax, datai[0]); \
nmax = max(nmax, datai[1]); \
nmax = max(nmax, datai[2]); \
if((response < nmax) || (response < 0)) \
goto key_finish; \
} \
else \
{ \
nmin = min(nmin, datai[0]); \
nmin = min(nmin, datai[1]); \
nmin = min(nmin, datai[2]); \
if((response > nmin) || (response > 0)) \
goto key_finish; \
}
#else
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1); \
datai[1] = tex1Dfetch(tex, idx); \
datai[2] = tex1Dfetch(tex, idx + 1); \
if(response > nmax) \
{ \
nmax = max(nmax, datai[0]); \
nmax = max(nmax, datai[1]); \
nmax = max(nmax, datai[2]); \
if(response < nmax) \
goto key_finish; \
} \
else \
{ \
nmin = min(nmin, datai[0]); \
nmin = min(nmin, datai[1]); \
nmin = min(nmin, datai[2]); \
if(response > nmin) \
goto key_finish; \
}
#endif // GPU_HESSIAN
void __global__ ComputeKEY_Kernel(float4 *d_key, int width, int colmax, int rowmax, float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
, int *featureTexLen
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
)
{
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x;
float data[3][3];
float datap[3][3];
float datan[3][3];
float response = 0.0f;
int index = IMUL(row, width) + col;
int idx[3] = {index - width, index, index + width};
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
int in_image = 0;
bool offset_test_passed = true;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
unsigned short pointType = FEATURE_TYPE_NONE;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
if((row > 0) && (col > 0) && (row < rowmax) && (col < colmax))
{
in_image = 1;
data[1][1] = response = tex1Dfetch(texC, idx[1]);
if(fabs(response) <= dog_threshold0)
goto key_finish;
// fetch left and right neighbour
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if((response <= nmax) && (response >= nmin))
goto key_finish;
//if((response > nmax && response < 0 )|| (response < nmin && response > 0)) goto key_finish;
// fetch values from the row above
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
// fetch values from one the row below
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
// edge supression
float vx2 = response * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if((temp1 <= 0) || (temp2 > edge_threshold * temp1))
goto key_finish; // local neighbourhood looks like an edge
// read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
// read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
// subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f * (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f * (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
// need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = (fxx > 0) ? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = (fxy > 0) ? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = (fxs > 0) ? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}
else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w /= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
response = data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs);
offset_test_passed = (fabs(response) > dog_threshold) && (fabs(ds) < 1.0f) && (fabs(dx) < 1.0f) && (fabs(dy) < 1.0f);
}
}
}
}
if(offset_test_passed)
#if defined GPU_HESSIAN
{
// find blob point type from Hessian matrix H, we know that:
// - if H is positive definite it is a DARK blob
// - if H is negative definite it is a BRIGHT blob
// - det H is negative it is a SADDLE point
data[1][1] = tex1Dfetch(texG, idx[1]);
data[1][0] = tex1Dfetch(texG, idx[1] - 1);
data[1][2] = tex1Dfetch(texG, idx[1] + 1);
if(response < 0)
{
pointType = FEATURE_TYPE_SADDLE_POINT;
}
else
{
// at this point we know that 2x2 determinant is positive
// so only check the remaining 1x1 subdeterminant
float Lxx = data[1][0] - 2*data[1][1] + data[1][2];
pointType = (Lxx > 0) ? FEATURE_TYPE_DARK_BLOB : FEATURE_TYPE_BRIGHT_BLOB;
}
}
#elif defined GPU_SIFT_MODIFIED
result = (response > nmax) ? FEATURE_TYPE_BRIGHT_BLOB : FEATURE_TYPE_DARK_BLOB;
#else
result = (response > nmax) ? 1.0 : -1.0;
#endif // GPU_HESSIAN / GPU_SIFT_MODIFIED
}
key_finish:
if(in_image)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
// result: response 16b | 14b unused | 2b type
unsigned int uspack = (((unsigned int)__float2half_rn(response)) << 16) | 0x00000004u | pointType;
result = *((float *)(&uspack)); // __uint_as_float(uspack); // CUDA 7.5
d_key[index] = make_float4(result, dx, dy, ds);
}
#else
d_key[index] = make_float4(result, dx, dy, ds);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
int count = __syncthreads_count(pointType != FEATURE_TYPE_NONE);
if(threadIdx.x+threadIdx.y*blockDim.x == 0)
{
atomicAdd(featureTexLen, count);
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key
#if defined GPU_HESSIAN
, CuTexImage* gus
#endif // GPU_HESSIAN
, float Tdog, float Tedge
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
, int *featureTexLen, int featureTexIdx
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
)
{
int width = dog->GetImgWidth();
int height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization ? 0.8f : 1.0f) * Tdog;
CuTexImage *dogp = dog - 1;
CuTexImage *dogn = dog + 1;
dim3 grid((width + KEY_BLOCK_DIMX - 1)/KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog->BindTexture(texC);
dogn->BindTexture(texN);
#if defined GPU_HESSIAN
gus->BindTexture(texG);
#endif // GPU_HESSIAN
Tedge = (Tedge+1)*(Tedge+1) / Tedge;
hipLaunchKernelGGL(( ComputeKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*)key->_cuData, width, width-1, height-1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
, featureTexLen + featureTexIdx
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
);
}
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
#define GENERATE_LIST_BLOCK_DIMX 32
#define GENERATE_LIST_BLOCK_DIMY 4
__device__ int GFL_WarpScan(int val, volatile int *sData, int threadID)
{
// pad each warp with zeros
// int idx = 2*threadIdx.x - (threadIdx.x & (warpSize-1)); // 1D
// int id = threadIdx.x + threadIdx.y*blockDim.x; // 2D
// int idx = 2*id - (id & (warpSize-1));
int idx = 2*threadID - (threadID & (warpSize-1));
sData[idx] = 0;
idx += warpSize;
int t = sData[idx] = val;
sData[idx] = t = t + sData[idx - 1];
sData[idx] = t = t + sData[idx - 2];
sData[idx] = t = t + sData[idx - 4];
sData[idx] = t = t + sData[idx - 8];
sData[idx] = t = t + sData[idx - 16];
return sData[idx-1];
}
__device__ unsigned int GFL_LaneMaskLt(int threadID)
{
// const unsigned int lane = threadIdx.x & (warpSize-1); // 1D block
// const unsigned int lane = (threadIdx.x + threadIdx.y*blockDim.x) & (warpSize-1); // 2D block
const unsigned int lane = threadID & (warpSize-1);
return (1 << (lane)) - 1;
}
__device__ unsigned int GFL_WarpPrefixSums(bool p, int threadID)
{
const unsigned int mask = GFL_LaneMaskLt(threadID);
unsigned int b = __ballot(p);
return __popc(b & mask);
}
__device__ int GFL_BlockBinaryPrefixSums(int x, int idx)
{
extern __shared__ int sData[];
//int idx = threadIdx.x + threadIdx.y*blockDim.x;
// A. Compute exclusive prefix sums within each warp
int warpPrefix = GFL_WarpPrefixSums(x, idx);
// int idx = threadIdx.x; // 1D
// int idx = threadIdx.x + threadIdx.y*blockDim.x; // 2D
int warpIdx = idx / warpSize;
int laneIdx = idx & (warpSize - 1);
// B. The last thread of each warp stores inclusive
// prefix sum to the warps index in shared memory
if(laneIdx == warpSize - 1)
sData[warpIdx] = warpPrefix + x;
__syncthreads();
// C. One warp scans the warp partial sums
if(idx < warpSize)
sData[idx] = GFL_WarpScan(sData[idx], sData, idx);
__syncthreads();
// D. Each thread adds prefix sums of warp partial
// sums to its own intra-warp prefix sums
return warpPrefix + sData[warpIdx];
}
void __global__ ListGen_Kernel(int4* d_list, int len, int width, int height, int *counter)
{
int row = IMUL(blockIdx.y, GENERATE_LIST_BLOCK_DIMY) + threadIdx.y;
int col = IMUL(blockIdx.x, GENERATE_LIST_BLOCK_DIMX) + threadIdx.x;
// read the detected keypoint type -> flag=0 (type == FEATURE_TYPE_NONE) / 1 (otherwise)
unsigned int flag = 0;
if((row > 0) && (col > 0) && (row < height-1) && (col < width-1))
{
int index = IMUL(row, width) + col;
float4 value = tex1Dfetch(texDataF4, index);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// value: (response 16b | 14b unused | 2b type), dx, dy, ds
// type = *((unsigned int *)(&offset.x)) & 0x00000003u;
flag = ((*((unsigned int *)(&value.x)) & 0x00000003u) != FEATURE_TYPE_NONE) ? 1 : 0;
#else
// value: (response 16b | 16b type), dx, dy, ds ... type = -1, 0, +1
unsigned int resultUInt = *((unsigned int*)(&value.x)) & 0x0000FFFFu; // float_as_uint(value.x) & 0x0000FFFFu; // CUDA 7.5
float result = __half2float(resultUInt);
flag = (fabs(result) > 0.5f) ? 1 : 0; // flag = (result != 0.0f) ? 1 : 0;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
int idxWithinBlock = IMUL(threadIdx.y, blockDim.x) + threadIdx.x;
// compute prefix sums for each thread int the block
int blockPrefixSum = GFL_BlockBinaryPrefixSums(flag, idxWithinBlock);
// allocate enough space in the feature list to store all features in this block
// number of features can be obtained by __syncthreads_count() or it is given
// by the prefix sum of the last thread in the block plus one (if the last thread
// represents detected feature)
__shared__ int blockStart; // index in the feature list of the first keypoint in the block
// int count = __syncthreads_count(flag);
// if(idxWitninBlock == 0)
// {
// blockStart = atomicAdd(counter, count);
// }
// __syncthreads();
if(idxWithinBlock == IMUL(blockDim.y, blockDim.x) - 1)
{
blockStart = atomicAdd(counter, blockPrefixSum + flag);
}
__syncthreads();
// put detected keypoint into the feature list
if(flag)
d_list[blockStart + blockPrefixSum] = make_int4(col, row, 0, 0);
}
#else
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if((row < height) && (col < wd))
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
// each thread process 4 subsequent colums values in the same row
if((row > 0) && (row < height-1))
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx+i);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
unsigned int featureType = *((unsigned int*)(&temp.x)) & 0x00000003u; // float_as_uint(temp.x) & 0x00000003u; // CUDA 7.5
v[i] = ((scol < ws-1) && (scol > 0) && (featureType != FEATURE_TYPE_NONE)) ? 1 : 0;
#else
v[i] = ((scol < ws-1) && (scol > 0) && (temp.x != 0.0)) ? 1 : 0;
#endif // GPU_SIFT_MODIFIED || GPU_HESSIAN
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth();
int hs = key->GetImgHeight();
int wd = hist->GetImgWidth();
int hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1) / HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
hipLaunchKernelGGL(( InitHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) hist->_cuData, ws, wd, hd);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if((row < height) && (col < wd))
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; (i < 4) && (scol < ws); ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage* hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth();
int hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth();
int hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2 / 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi;
const int BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1) / BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
hipLaunchKernelGGL(( ReduceHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*)hist2->_cuData, ws, wd, hd);
}
void __global__ ListGen_Kernel(int4* d_list, int len, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// GPU_HESSIAN fix
if(idx1 >= len)
return;
#endif // GPU_HESSIAN
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}
else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}
else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
// input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* key, int *counter)
{
int len = list->GetImgWidth();
key->BindTexture(texDataF4);
int width = key->GetImgWidth();
int height = key->GetImgHeight();
dim3 grid((width + GENERATE_LIST_BLOCK_DIMX - 1)/GENERATE_LIST_BLOCK_DIMX, (height + GENERATE_LIST_BLOCK_DIMY - 1)/GENERATE_LIST_BLOCK_DIMY);
dim3 block(GENERATE_LIST_BLOCK_DIMX, GENERATE_LIST_BLOCK_DIMY);
// shared memory is used to store warp scan data
hipLaunchKernelGGL(( ListGen_Kernel), dim3(grid), dim3(block), 2*32*sizeof(int), 0, (int4*)list->_cuData, len, width, height, counter);
}
#else
// input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) / LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
hipLaunchKernelGGL(( ListGen_Kernel), dim3(grid), dim3(block), 0, 0, (int4*)list->_cuData, len, hist->GetImgWidth());
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
void __global__ ComputeOrientation_Kernel(float4* d_list, int list_len, int width, int height, float sigma, float sigma_step, float gaussian_factor, float sample_factor,
int num_orientation, int existing_keypoint, int subpixel, int keepsign
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
, bool doHalfSIFT
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len)
return;
float4 key;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
union {
float flt; // include response (16b), unused (14b), feature type (2b)
unsigned int uint;
} additionalData;
additionalData.flt = 0.0f;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
// read the data
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// unpack scale, x, and y
// later store just the strongest computed orientation
// existing keypoint input
// key.x: response 8b H (cleared to zero) | x 24b-14.10
// key.y: response 8b L (cleared to zero) | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation (if any)
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
if(subpixel || keepsign)
{
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
// offset: x(response 16b | 14b unused | 2b type), dx, dy, ds
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
additionalData.flt = offset.x;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
if(keepsign) // not supported for hessian
key.z *= offset.x;
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
}
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
}
if(num_orientation == 0)
{
key.w = 0;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
goto key_store_finish;
#else
d_list[idx] = key;
return;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i)
vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
float dy = y - key.y;
dy *= dy;
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float sq_dist = dx * dx + dy;
if(sq_dist >= dist_threshold)
continue;
float2 got = tex2D(texDataF2, x, y);
// float weight = got.x * exp(sq_dist * factor);
// float fidx = floorf(got.y * ten_degree_per_radius);
// int oidx = fidx;
int oidx = (int)floorf(got.y * ten_degree_per_radius);
if(oidx < 0)
oidx += 36;
vote[oidx] += got.x * expf(sq_dist * factor); // vote[oidx] += weight;
}
}
// filter the vote
const float one_third = 1.0 / 3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j];
vote[j] = temp;
}
}
vote[36] = vote[0];
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
if(doHalfSIFT)
{
#pragma unroll
for(int i = 0; i < 18; i++)
{
vote[i] += vote[i+18];
vote[i+18] = 0;
}
}
int orientationsCount = 0;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
// just one orientation
if((num_orientation == 1) || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = (vote[i] > max_vote) ? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[(index_max == 0) ? 35 : index_max - 1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
d_list[idx] = key;
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
}
// multi-orientations allowed
else
{
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// up to 4 orientations may be stored
// max number of stored orientations depends on num_orientations parameter (1..4)
#define MAX_ORIENTATIONS 4
// find the maximum value
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_vot[MAX_ORIENTATIONS+1];
float max_rot[MAX_ORIENTATIONS+1];
#pragma unroll
for(int i=0; i < 36; ++i)
{
float next = vote[i + 1];
if((vote[i] > vote_threshold) && (vote[i] > pre) && (vote[i] > next)) // max from neighbours
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
int idx = orientationsCount;
if(orientationsCount > 0)
{
// shift values
while((idx > 0) && (max_vot[idx-1] < weight)) {
max_vot[idx] = max_vot[idx-1];
max_rot[idx] = max_rot[idx-1];
idx--;
}
}
// store maximum found
max_vot[idx] = weight;
max_rot[idx] = rot;
if(orientationsCount < MAX_ORIENTATIONS)
orientationsCount++;
}
pre = vote[i];
}
unsigned int packedOrientations = 0;
// first 4 orientations (if exist)
unsigned int maxCount = min(4, orientationsCount);
int idx = 0;
for(; idx < maxCount; idx++)
{
float orientation = max_rot[idx] / 36.0f;
if(orientation < 0)
orientation += 1.0f;
unsigned int uiOrientation = (unsigned int) floorf(orientation * 255.0f);
packedOrientations = packedOrientations | (uiOrientation << 8*idx);
}
key.w = *((float *)(&packedOrientations)); // __uint_as_float(packedOrientations); // CUDA 7.5
#else
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i=0; i < 36; ++i)
{
float next = vote[i + 1];
if((vote[i] > vote_threshold) && (vote[i] > pre) && (vote[i] > next))
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0)
fr1 += 1.0f;
unsigned short us1 = (ocount == 0) ? 65535 : ((unsigned short)floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0)
fr2 += 1.0f;
us2 = (unsigned short) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = *((float *)(&uspack)); // __uint_as_float(uspack); // CUDA 7.5
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
d_list[idx] = key;
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
key_store_finish:
// input:
// additionalData: response 16b | 14b unused | 2b type
// output in the feature list d_list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 3b orientations count | 11b unused | scale 16b-8.8
// key.w: 8b orientation1 | 8b orientation2 | 8b orientation3 | 8b orientation4
if(!existing_keypoint)
{
unsigned int posX = (unsigned int)FLOAT_TO_FIXED_POINT(key.x, FIXED_POINT_POSITION_PRECISION_BITS);
posX = posX & FIXED_POINT_POSITION_MASK;
unsigned int posY = (unsigned int)FLOAT_TO_FIXED_POINT(key.y, FIXED_POINT_POSITION_PRECISION_BITS);
posY = posY & FIXED_POINT_POSITION_MASK;
// store response
posX = posX | (additionalData.uint & FIXED_POINT_RESPONSE_MASK);
posY = posY | ((additionalData.uint << 8) & FIXED_POINT_RESPONSE_MASK);
unsigned int scale = (unsigned int)(FLOAT_TO_FIXED_POINT(key.z, FIXED_POINT_SCALE_PRECISION_BITS));
scale = scale & FIXED_POINT_SCALE_MASK;
// type & orientations count (0 means single float value in key.w, otherwise we have to unpack 8b orientations into floats)
scale = scale | ((additionalData.uint & 0x00000003u) << 30) | ((orientationsCount & 0x00000007u) << 27);
key.z = *((float *)(&scale)); // __uint_as_float(scale); // CUDA 7.5
key.x = *((float *)(&posX)); // __uint_as_float(posX); // CUDA 7.5
key.y = *((float *)(&posY)); // __uint_as_float(posY); // CUDA 7.5
d_list[idx] = key;
}
else
{
// for existing keypoint just overwrite computed orientation and other components remain the same (x, y, and scale)
// ReshapeFeatureListCPU() is not called when we have just one orientation
d_list[idx].w = key.w;
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage* key, float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0)
return;
int width = got->GetImgWidth();
int height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}
else
{
list->BindTexture(texDataList);
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
if(GlobalUtil::_SubpixelLocalization)
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = (len < ORIENTATION_COMPUTE_PER_BLOCK) ? 16 : ORIENTATION_COMPUTE_PER_BLOCK;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( ComputeOrientation_Kernel), dim3(grid), dim3(block), 0, 0,
(float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation ? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
, GlobalUtil::_HalfSIFT
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
template <bool DYNAMIC_INDEXING, bool HALF_SIFT>
#else
template <bool DYNAMIC_INDEXING>
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
void __global__ ComputeDescriptor_Kernel(float4* d_des, int num, int width, int height, float window_factor)
{
const float rpi = 4.0 / PI;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num)
return;
// fetch the feature
float4 key = tex1Dfetch(texDataF4, fidx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
int bidx = idx & 0xf;
int ix = bidx & 0x3;
int iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c;
__sincosf(key.w, &s, &c);
float anglef = (key.w > PI) ? (key.w - (2.0 * PI)) : key.w;
float cspt = c * spt;
float sspt = s * spt;
float crspt = c / spt;
float srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i = 0; i < 9; ++i)
des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if((nxn < 1.0f) && (nyn < 1.0f))
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = expf(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0)
theta += 8.0f;
float fo = floorf(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
// this dynamic indexing part might be slow
}
else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
if(HALF_SIFT)
{
// half sift -> 4 directions only
des[0] += des[4];
des[1] += des[5];
des[2] += des[6];
des[3] += des[7];
d_des[idx] = make_float4(des[0], des[1], des[2], des[3]);
return;
}
else
{
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
// full sift -> 8 directions
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
template <bool DYNAMIC_INDEXING, bool HALF_SIFT>
#else
template <bool DYNAMIC_INDEXING>
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num, int width, int height, float window_factor)
{
const float rpi = 4.0 / PI;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num)
return;
// fetch the feature
float4 key = tex1Dfetch(texDataF4, fidx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation1 orientation2
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
int bidx = idx & 0xf;
int ix = bidx & 0x3;
int iy = bidx >> 2;
// float aspect_ratio = key.w / key.z;
// float aspect_sq = aspect_ratio * aspect_ratio;
float sptx = key.z * 0.25;
float spty = key.w * 0.25;
float xmin, ymin, xmax, ymax;
float2 pt;
pt.x = sptx * (ix + 0.5f) + key.x;
pt.y = spty * (iy + 0.5f) + key.y;
xmin = max(1.5f, floorf(pt.x - sptx) + 0.5f);
ymin = max(1.5f, floorf(pt.y - spty) + 0.5f);
xmax = min(width - 1.5f, floorf(pt.x + sptx) + 0.5f);
ymax = min(height - 1.5f, floorf(pt.y + spty) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i)
des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float nx = (x - pt.x) / sptx;
float ny = (y - pt.y) / spty;
float nxn = fabs(nx);
float nyn = fabs(ny);
if((nxn < 1.0f) && (nyn < 1.0f))
{
float2 cc = tex2D(texDataF2, x, y);
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = wx * wy * cc.x;
float theta = (- cc.y) * rpi;
if(theta < 0)
theta += 8.0f;
float fo = floorf(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
// this dynamic indexing part might be slow
}
else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
if(HALF_SIFT)
{
// half sift -> 4 directions only
des[0] += des[4];
des[1] += des[5];
des[2] += des[6];
des[3] += des[7];
d_des[idx] = make_float4(des[0], des[1], des[2], des[3]);
return;
}
else
{
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
// full sift -> 8 directions
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
#if (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED) && defined NORMALIZE_DESCRIPTOR_PER_WARP
texture<float2, 1, hipReadModeElementType> texDataH2;
__device__ float ND_WarpReduction(volatile float *sData, int idx)
{
int warpIdx = idx & (warpSize-1); // index within warp
// parallel reduction within warp - just half of the warp is active
if(warpIdx < 16)
{
sData[idx] += sData[idx + 16];
sData[idx] += sData[idx + 8];
sData[idx] += sData[idx + 4];
sData[idx] += sData[idx + 2];
sData[idx] += sData[idx + 1];
}
return sData[idx-warpIdx]; // first thread in the warp
}
// assumes the block size (128, 1, 1) and grid size (Bx, 1, 1)
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
// size => numThreadsPerBlock * sizeof(float); numThreadsPerBlock must be multiple of 32
extern __shared__ volatile float reductionCache[];
// int globalIdx = threadIdx.x + IMUL(blockIdx.x + IMUL(blockIdx.y, gridDim.x), blockDim.x); // 2D grid
int globalIdx = threadIdx.x + IMUL(blockIdx.x, blockDim.x); // 1D grid
int localIdx = threadIdx.x;
while(globalIdx < 32*num)
{
// the vector is first normalized to unit length, thus adjusting for changing image contrast
float4 temp = tex1Dfetch(texDataF4, globalIdx);
float norm1 = (temp.x*temp.x + temp.y*temp.y + temp.z*temp.z + temp.w*temp.w);
reductionCache[localIdx] = norm1;
// __syncthreads(); // threads in warp are always sync
norm1 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... then all feature dimensions are thresholded to a maximum value of 0.2
temp.x = min(0.2f, temp.x * norm1);
temp.y = min(0.2f, temp.y * norm1);
temp.z = min(0.2f, temp.z * norm1);
temp.w = min(0.2f, temp.w * norm1);
float norm2 = (temp.x*temp.x + temp.y*temp.y + temp.z*temp.z + temp.w*temp.w);
reductionCache[localIdx] = norm2;
// __syncthreads(); // threads in warp are always sync
norm2 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... and the vector is again normalized to unit length
temp.x *= norm2;
temp.y *= norm2;
temp.z *= norm2;
temp.w *= norm2;
d_des[globalIdx] = temp;
// move to the next descriptor, if there is any unprocessed
globalIdx += IMUL(gridDim.x, blockDim.x);
}
}
// assumes the block size (128, 1, 1) and grid size (Bx, 1, 1)
// version used for half sift
void __global__ NormalizeDescriptor_Kernel(float2* d_des, int num)
{
// size => numThreadsPerBlock * sizeof(float); numThreadsPerBlock must be multiple of 32
extern __shared__ volatile float reductionCache[];
// int globalIdx = threadIdx.x + IMUL(blockIdx.x + IMUL(blockIdx.y, gridDim.x), blockDim.x); // 2D grid
int globalIdx = threadIdx.x + IMUL(blockIdx.x, blockDim.x); // 1D grid
int localIdx = threadIdx.x;
while(globalIdx < 32*num)
{
// the vector is first normalized to unit length, thus adjusting for changing image contrast
float2 temp = tex1Dfetch(texDataH2, globalIdx);
float norm1 = (temp.x*temp.x + temp.y*temp.y);
reductionCache[localIdx] = norm1;
// __syncthreads(); // threads in warp are always sync
norm1 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... then all feature dimensions are thresholded to a maximum value of 0.2
temp.x = min(0.2f, temp.x * norm1);
temp.y = min(0.2f, temp.y * norm1);
float norm2 = (temp.x*temp.x + temp.y*temp.y);
reductionCache[localIdx] = norm2;
// __syncthreads(); // threads in warp are always sync
norm2 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... and the vector is again normalized to unit length
temp.x *= norm2;
temp.y *= norm2;
d_des[globalIdx] = temp;
// move to the next descriptor, if there is any unprocessed
globalIdx += IMUL(gridDim.x, blockDim.x);
}
}
#else
template<int DESCRIPTOR_SIZE, int SHIFT>
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[DESCRIPTOR_SIZE];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num)
return;
int sidx = idx << SHIFT;
float norm1 = 0;
float norm2 = 0;
#pragma unroll
// the vector is first normalized to unit length, thus adjusting for changing image contrast
for(int i = 0; i < DESCRIPTOR_SIZE; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx+i);
norm1 += (temp[i].x*temp[i].x + temp[i].y*temp[i].y + temp[i].z*temp[i].z + temp[i].w*temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
// ... then all feature dimensions are thresholded to a maximum value of 0.2
for(int i = 0; i < DESCRIPTOR_SIZE; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x*temp[i].x + temp[i].y*temp[i].y + temp[i].z*temp[i].z + temp[i].w*temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
// ... and the vector is again normalized to unit length
for(int i = 0; i < DESCRIPTOR_SIZE; ++i)
{
temp[i].x *= norm2;
temp[i].y *= norm2;
temp[i].z *= norm2;
temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
#endif // (GPU_HESSIAN || GPU_SIFT_MODIFIED) && NORMALIZE_DESCRIPTOR_PER_WARP
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num * 128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(rect)
{
if(GlobalUtil::_UseDynamicIndexing)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<true, true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<true, false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
else
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<false, true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<false, false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
hipLaunchKernelGGL(( ComputeDescriptorRECT_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
else
{
if(GlobalUtil::_UseDynamicIndexing)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<true, true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<true, false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
else
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<false, true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<false, false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
if(GlobalUtil::_NormalizedSIFT)
{
#if (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED) && defined NORMALIZE_DESCRIPTOR_PER_WARP
// 32 threads (one warp) normalize one descriptor -> one thread handles one float4
// 1D block size (128, 1, 1) => four descriptors are normalized per block
// 1D grid size (Bx, 1, 1)
if(GlobalUtil::_HalfSIFT)
dtex->BindTexture(texDataH2);
else
dtex->BindTexture(texDataF4);
const int blockWidth = DESCRIPTOR_NORMALIZE_PER_BLOCK;
int blocksInGrid = min(16384, (num*32 + blockWidth -1) / blockWidth);
dim3 grid(blocksInGrid);
dim3 block(blockWidth);
if(GlobalUtil::_HalfSIFT)
hipLaunchKernelGGL(( NormalizeDescriptor_Kernel), dim3(grid), dim3(block), blockWidth*sizeof(float), 0, (float2*) dtex->_cuData, num);
else
hipLaunchKernelGGL(( NormalizeDescriptor_Kernel), dim3(grid), dim3(block), blockWidth*sizeof(float), 0, (float4*) dtex->_cuData, num);
#else
dtex->BindTexture(texDataF4);
const int block_width = DESCRIPTOR_NORMALIZE_PER_BLOCK;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( NormalizeDescriptor_Kernel<32, 5>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num);
#endif // (GPU_HESSIAN || GPU_SIFT_MODIFIED) && NORMALIZE_DESCRIPTOR_PER_WARP
}
CheckErrorCUDA("ComputeDescriptor");
}
#if (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED) && defined TOP_K_SELECTION
// Enables maximum occupancy - bitonic sort
#define SHARED_SIZE_LIMIT 1024U // CC2.0
#define TOPK_BLOCK_SIZE 128 // 256
void ProgramCU::TopKInit(TopKData &data, int listSize, int countThreshold)
{
data.keypointsCount = listSize;
// the closest power of two higher than keypoints count
data.keypointsCountAsPowerOfTwo = 1;
while (data.keypointsCountAsPowerOfTwo < listSize)
data.keypointsCountAsPowerOfTwo *= 2;
// keypoints count have to be at least SHARED_SIZE_LIMIT (required by bitonic sort implementation)
data.keypointsCountAsPowerOfTwo = max(data.keypointsCountAsPowerOfTwo, SHARED_SIZE_LIMIT);
// allocate arrays to store responses and indices
hipMalloc(&data.keys, (data.keypointsCountAsPowerOfTwo+1) * sizeof(float)); // one extra element in both arrays is used by prefix scan
hipMalloc(&data.indices, (data.keypointsCountAsPowerOfTwo+1) * sizeof(unsigned int));
data.topKCountThreshold = countThreshold;
}
void ProgramCU::TopKFinish(TopKData &data)
{
// release all data
hipFree(data.keys);
hipFree(data.indices);
hipFree(data.devLevelFeaturesCount);
delete[] data.levelFeaturesCount;
}
void __global__ CopyTopKData_Kernel(int width, float *keys, unsigned int *indices, int listLen, int offset, int keypointsCount)
{
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= listLen)
return;
float response = MIN_VALUE;
if(idx+offset < keypointsCount)
{
// read keypoint index
int4 ikey = tex1Dfetch(texDataList, idx);
int keyIdx = IMUL(width, ikey.y) + ikey.x;
// read keypoint additional info
float4 key = tex1Dfetch(texDataF4, keyIdx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// keypoint info
// key.x: response 16b | 14b unused | 2b type
// key.y: x
// key.z: y
// key.w: scale
// extract response
unsigned int value = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
value = (value & 0xFFFF0000u) >> 16;
response = __half2float((unsigned short)value);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
keys[idx+offset] = fabs(response);
indices[idx+offset] = idx+offset;
}
void ProgramCU::TopKCopyData(CuTexImage* list, CuTexImage* key, TopKData &topKData, int offset)
{
bool isActivePadding = ((list == NULL) && (key == NULL)); // clear the padding values?
int len = isActivePadding ? (topKData.keypointsCountAsPowerOfTwo - offset) : list->GetImgWidth();
if(len <= 0)
return;
int width = isActivePadding ? 0 : key->GetImgWidth();
if(!isActivePadding)
{
list->BindTexture(texDataList);
key->BindTexture(texDataF4);
}
int blockWidth = TOPK_BLOCK_SIZE;
dim3 grid((len + blockWidth - 1) / blockWidth);
dim3 block(blockWidth);
hipLaunchKernelGGL(( CopyTopKData_Kernel), dim3(grid), dim3(block), 0, 0, width, topKData.keys, topKData.indices, len, offset, topKData.keypointsCount);
CheckErrorCUDA("CopyTopKData");
}
// Map to single instructions on G8x / G9x / G100
#define UMUL(a, b) __umul24((a), (b))
#define UMAD(a, b, c) ( UMUL((a), (b)) + (c) )
__device__ inline void BMS_Comparator(float &keyA, unsigned int &valA, float &keyB, unsigned int &valB, unsigned int dir)
{
union {
unsigned int uintValue;
float floatValue;
};
if ((keyA > keyB) == dir)
{
floatValue = keyA; keyA = keyB; keyB = floatValue;
uintValue = valA; valA = valB; valB = uintValue;
}
}
////////////////////////////////////////////////////////////////////////////////
// Monolithic bitonic sort kernel for short arrays fitting into shared memory
////////////////////////////////////////////////////////////////////////////////
__global__ void BitonicSortShared_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int dir)
{
// Shared memory storage for one or more short vectors
__shared__ float sKey[SHARED_SIZE_LIMIT];
__shared__ unsigned int sVal[SHARED_SIZE_LIMIT];
// Offset to the beginning of subbatch and load data
int offset = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
srcKey += offset;
srcVal += offset;
dstKey += offset;
dstVal += offset;
sKey[threadIdx.x + 0] = srcKey[ 0];
sVal[threadIdx.x + 0] = srcVal[ 0];
sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcKey[(SHARED_SIZE_LIMIT / 2)];
sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcVal[(SHARED_SIZE_LIMIT / 2)];
for (unsigned int size = 2; size < arrayLength; size <<= 1)
{
// bitonic merge
unsigned int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (unsigned int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
}
// ddd == dir for the last bitonic merge step
{
for (unsigned int stride = arrayLength / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
dir
);
}
}
__syncthreads();
dstKey[ 0] = sKey[threadIdx.x + 0];
dstVal[ 0] = sVal[threadIdx.x + 0];
dstKey[(SHARED_SIZE_LIMIT / 2)] = sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
dstVal[(SHARED_SIZE_LIMIT / 2)] = sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Bitonic sort kernel for large arrays (not fitting into shared memory)
////////////////////////////////////////////////////////////////////////////////
// Bottom-level bitonic sort
// Almost the same as bitonicSortShared with the exception of even / odd subarrays being sorted in opposite directions
// Bitonic merge accepts both Ascending | descending or descending | ascending sorted pairs
__global__ void BitonicSortShared1_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal)
{
// Shared memory storage for current subarray
__shared__ float sKey[SHARED_SIZE_LIMIT];
__shared__ unsigned int sVal[SHARED_SIZE_LIMIT];
// Offset to the beginning of subarray and load data
int offset = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
srcKey += offset;
srcVal += offset;
dstKey += offset;
dstVal += offset;
sKey[threadIdx.x + 0] = srcKey[ 0];
sVal[threadIdx.x + 0] = srcVal[ 0];
sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcKey[(SHARED_SIZE_LIMIT / 2)];
sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcVal[(SHARED_SIZE_LIMIT / 2)];
for (unsigned int size = 2; size < SHARED_SIZE_LIMIT; size <<= 1)
{
// Bitonic merge
unsigned int ddd = (threadIdx.x & (size / 2)) != 0;
for (unsigned int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
}
// odd / even arrays of SHARED_SIZE_LIMIT elements
// sorted in opposite directions
unsigned int ddd = blockIdx.x & 1;
{
for (unsigned int stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
}
__syncthreads();
dstKey[ 0] = sKey[threadIdx.x + 0];
dstVal[ 0] = sVal[threadIdx.x + 0];
dstKey[(SHARED_SIZE_LIMIT / 2)] = sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
dstVal[(SHARED_SIZE_LIMIT / 2)] = sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
// Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT
__global__ void BitonicMergeGlobal_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int size, unsigned int stride, unsigned int dir)
{
unsigned int global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int comparatorI = global_comparatorI & (arrayLength / 2 - 1);
// Bitonic merge
unsigned int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
unsigned int pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
float keyA = srcKey[pos + 0];
unsigned int valA = srcVal[pos + 0];
float keyB = srcKey[pos + stride];
unsigned int valB = srcVal[pos + stride];
BMS_Comparator(
keyA, valA,
keyB, valB,
ddd
);
dstKey[pos + 0] = keyA;
dstVal[pos + 0] = valA;
dstKey[pos + stride] = keyB;
dstVal[pos + stride] = valB;
}
// Combined bitonic merge steps for size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2]
__global__ void BitonicMergeShared_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int size, unsigned int dir)
{
// Shared memory storage for current subarray
__shared__ float sKey[SHARED_SIZE_LIMIT];
__shared__ unsigned int sVal[SHARED_SIZE_LIMIT];
int offset = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
srcKey += offset;
srcVal += offset;
dstKey += offset;
dstVal += offset;
sKey[threadIdx.x + 0] = srcKey[ 0];
sVal[threadIdx.x + 0] = srcVal[ 0];
sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcKey[(SHARED_SIZE_LIMIT / 2)];
sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcVal[(SHARED_SIZE_LIMIT / 2)];
// Bitonic merge
unsigned int comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
unsigned int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for(unsigned int stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
__syncthreads();
dstKey[ 0] = sKey[threadIdx.x + 0];
dstVal[ 0] = sVal[threadIdx.x + 0];
dstKey[(SHARED_SIZE_LIMIT / 2)] = sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
dstVal[(SHARED_SIZE_LIMIT / 2)] = sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
unsigned int BMS_FactorOf2(unsigned int *log2L, unsigned int L)
{
if (!L)
{
*log2L = 0;
return 0;
}
else
{
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
///////////////////////////////////////////////////////////////////////////////
// bitonic sort is borrowed from the sorting networks sample which is a part of
// the NVIDIA GPU Computing SDK (NVIDIA CUDA Code Samples)
unsigned int BMS_BitonicSort(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int dir)
{
// Nothing to sort
if(arrayLength < 2)
return 0;
// only power-of-two array lengths are supported by this implementation
unsigned int log2L;
unsigned int factorizationRemainder = BMS_FactorOf2(&log2L, arrayLength);
assert(factorizationRemainder == 1);
dir = (dir != 0);
unsigned int blockCount = arrayLength / SHARED_SIZE_LIMIT;
unsigned int threadCount = SHARED_SIZE_LIMIT / 2;
if(arrayLength <= SHARED_SIZE_LIMIT)
{
assert(arrayLength % SHARED_SIZE_LIMIT == 0);
hipLaunchKernelGGL(( BitonicSortShared_Kernel), dim3(blockCount), dim3(threadCount), 0, 0, dstKey, dstVal, srcKey, srcVal, arrayLength, dir);
}
else
{
hipLaunchKernelGGL(( BitonicSortShared1_Kernel), dim3(blockCount), dim3(threadCount), 0, 0, dstKey, dstVal, srcKey, srcVal);
for(unsigned int size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1)
for (unsigned stride = size / 2; stride > 0; stride >>= 1)
if(stride >= SHARED_SIZE_LIMIT)
{
hipLaunchKernelGGL(( BitonicMergeGlobal_Kernel), dim3(arrayLength / 512), dim3(256), 0, 0, dstKey, dstVal, dstKey, dstVal, arrayLength, size, stride, dir);
}
else
{
hipLaunchKernelGGL(( BitonicMergeShared_Kernel), dim3(blockCount), dim3(threadCount), 0, 0, dstKey, dstVal, dstKey, dstVal, arrayLength, size, dir);
break;
}
}
return threadCount;
}
#undef SHARED_SIZE_LIMIT
void ProgramCU::TopKSort(TopKData &topKData)
{
// sort in descending order
unsigned int threadsCount = BMS_BitonicSort(
topKData.keys, topKData.indices, // dst
topKData.keys, topKData.indices, // src
topKData.keypointsCountAsPowerOfTwo, // array length - have to be power of 2
0 // sort direction
);
}
__global__ void MarkSelectedElements_Kernel(unsigned int *outFlags, unsigned int *inIndices, int len, int topK)
{
int threadID = threadIdx.x + blockDim.x* blockIdx.x;
if (threadID >= len)
return;
if(threadID < topK)
{
unsigned int index = inIndices[threadID];
outFlags[index] = 1;
} // 0 are not written because of cleared array to 0
}
/////////////////////
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance is lower with ZERO_BANK_CONFLICTS enabled.
// It is provided as an example.
//#define ZERO_BANK_CONFLICTS
// 16 banks on G80, 32 foc CC 2.X and 3.X
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
///////////////////////////////////////////////////////////////////////////////
// prefix scan is borrowed from the scan large array sample which is a part of
// the NVIDIA GPU Computing SDK
//
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
template <bool isNP2>
__device__ void PPS_LoadSharedChunkFromMem(unsigned int *sData, const unsigned int *inData, int n, int baseIndex, int& ai, int& bi, int& memAi, int& memBi, int& bankOffsetA, int& bankOffsetB)
{
int threadID = threadIdx.x;
memAi = baseIndex + threadIdx.x;
memBi = memAi + blockDim.x;
ai = threadID;
bi = threadID + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// cache the computational window in shared memory, pad values beyond n with zeros
sData[ai + bankOffsetA] = inData[memAi];
if(isNP2) // compile-time decision
{
sData[bi + bankOffsetB] = (bi < n) ? inData[memBi] : 0;
}
else
{
sData[bi + bankOffsetB] = inData[memBi];
}
}
template <bool isNP2>
__device__ void PPS_StoreSharedChunkToMem(unsigned int *outData, const unsigned int *sData, int n, int ai, int bi, int memAi, int memBi, int bankOffsetA, int bankOffsetB)
{
__syncthreads();
// write results to global memory
outData[memAi] = sData[ai + bankOffsetA];
if(isNP2) // compile-time decision
{
if(bi < n)
outData[memBi] = sData[bi + bankOffsetB];
}
else
{
outData[memBi] = sData[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void PPS_ClearLastElement(unsigned int *sData, unsigned int *blockSums, int blockIndex)
{
if(threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if(storeSum) // compile-time decision
{
// write this block's total sum to the corresponding index in the blockSums array
blockSums[blockIndex] = sData[index];
}
// zero the last element in the scan so it will propagate back to the front
sData[index] = 0;
}
}
__device__ unsigned int PPS_BuildSum(unsigned int *sData)
{
unsigned int threadID = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for(int d = blockDim.x; d > 0; d >>= 1)
{
__syncthreads();
if(threadID < d)
{
int i = IMUL(IMUL(2, stride), threadID);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
sData[bi] += sData[ai];
}
stride *= 2;
}
return stride;
}
__device__ void PPS_ScanRootToLeaves(unsigned int *sData, unsigned int stride)
{
unsigned int threadID = threadIdx.x;
// traverse down the tree building the scan in place
for(int d = 1; d <= blockDim.x; d *= 2)
{
stride >>= 1;
__syncthreads();
if(threadID < d)
{
int i = IMUL(IMUL(2, stride), threadID);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int t = sData[ai];
sData[ai] = sData[bi];
sData[bi] += t;
}
}
}
template <bool storeSum>
__device__ void PPS_PrescanBlock(unsigned int *data, int blockIndex, unsigned int *blockSums)
{
// build the sum in place up the tree
int stride = PPS_BuildSum(data);
PPS_ClearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
// traverse down tree to build the scan
PPS_ScanRootToLeaves(data, stride);
}
template <bool storeSum, bool isNP2>
__global__ void PPS_Prescan_Kernel(unsigned int *outData, const unsigned int *inData, unsigned int *blockSums, int n, int blockIndex, int baseIndex)
{
extern __shared__ unsigned int shData[];
int ai, bi, memAi, memBi, bankOffsetA, bankOffsetB;
// load data into shared memory
PPS_LoadSharedChunkFromMem<isNP2>(shData, inData, n, (baseIndex == 0) ? IMUL(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai, bi, memAi, memBi, bankOffsetA, bankOffsetB);
// scan the data in each block
PPS_PrescanBlock<storeSum>(shData, blockIndex, blockSums);
// write results to device memory
PPS_StoreSharedChunkToMem<isNP2>(outData, shData, n, ai, bi, memAi, memBi, bankOffsetA, bankOffsetB);
}
__global__ void PPS_UniformAdd_Kernel(unsigned int *data, unsigned int *uniforms, int n, int blockOffset, int baseIndex)
{
__shared__ unsigned int uni;
if(threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = IMUL(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
data[address] += uni;
data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
inline bool PPS_IsPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
inline int PPS_FloorPow2(int n)
{
#ifdef WIN32
// method 2
return 1 << (int)logb((float)n);
#else
// method 1
// float nf = (float)n;
// return 1 << (((*(int*)&nf) >> 23) - 127);
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define THREADS_PER_BLOCK 128
void PPS_PreallocBlockSums(TopKData &topKData, unsigned int maxNumElements)
{
assert(topKData.numElementsAllocated == 0); // shouldn't be called
topKData.numElementsAllocated = maxNumElements;
unsigned int blockSize = THREADS_PER_BLOCK; // max size of the thread blocks
unsigned int numElements = maxNumElements;
int level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
if(numBlocks > 1)
level++;
numElements = numBlocks;
} while(numElements > 1);
topKData.scanBlockSums = (unsigned int**) malloc(level * sizeof(unsigned int*));
topKData.numLevelsAllocated = level;
numElements = maxNumElements;
level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
if (numBlocks > 1)
hipMalloc((void**) &topKData.scanBlockSums[level++], numBlocks * sizeof(unsigned int));
numElements = numBlocks;
} while(numElements > 1);
}
void PPS_DeallocBlockSums(TopKData &topKData)
{
for(unsigned int i = 0; i < topKData.numLevelsAllocated; i++)
hipFree(topKData.scanBlockSums[i]);
free((void**)topKData.scanBlockSums);
topKData.scanBlockSums = NULL;
topKData.numElementsAllocated = 0;
topKData.numLevelsAllocated = 0;
}
// prefix scan is borrowed from the scan large array sample which is a part of NVIDIA GPU Computing SDK
void PPS_PrescanArrayRecursive(TopKData &topKData, unsigned int *outArray, const unsigned int *inArray, int numElements, int level)
{
unsigned int blockSize = THREADS_PER_BLOCK; // max size of the thread blocks
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if(numBlocks > 1)
numThreads = blockSize;
else if(PPS_IsPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = PPS_FloorPow2(numElements);
unsigned int numElementsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numElementsLastBlock = numElements - (numBlocks-1) * numElementsPerBlock;
unsigned int numThreadsLastBlock = max(1, numElementsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numElementsLastBlock != numElementsPerBlock)
{
np2LastBlock = 1;
if(!PPS_IsPowerOfTwo(numElementsLastBlock))
numThreadsLastBlock = PPS_FloorPow2(numElementsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(unsigned int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numElementsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(unsigned int) * (numElementsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if(numBlocks > 1)
{
hipLaunchKernelGGL(( PPS_Prescan_Kernel<true, false>), dim3(grid), dim3(threads), sharedMemSize, 0, outArray, inArray, topKData.scanBlockSums[level], numThreads*2, 0, 0);
if(np2LastBlock)
hipLaunchKernelGGL(( PPS_Prescan_Kernel<true, true>), dim3(1), dim3(numThreadsLastBlock), sharedMemLastBlock, 0, outArray, inArray, topKData.scanBlockSums[level], numElementsLastBlock, numBlocks - 1, numElements - numElementsLastBlock);
// After scanning all the sub-blocks, we are mostly done. But now we need to take all of the last
// values of the sub-blocks and scan those. This will give us a new value that must be sdded to each
// block to get the final results.
// recursive (CPU) call
PPS_PrescanArrayRecursive(topKData, topKData.scanBlockSums[level], topKData.scanBlockSums[level], numBlocks, level+1);
hipLaunchKernelGGL(( PPS_UniformAdd_Kernel), dim3(grid), dim3(threads), 0, 0, outArray, topKData.scanBlockSums[level], numElements - numElementsLastBlock, 0, 0);
if(np2LastBlock)
hipLaunchKernelGGL(( PPS_UniformAdd_Kernel), dim3(1), dim3(numThreadsLastBlock), 0, 0, outArray, topKData.scanBlockSums[level], numElementsLastBlock, numBlocks - 1, numElements - numElementsLastBlock);
}
else if(PPS_IsPowerOfTwo(numElements))
{
hipLaunchKernelGGL(( PPS_Prescan_Kernel<false, false>), dim3(grid), dim3(threads), sharedMemSize, 0, outArray, inArray, 0, numThreads*2, 0, 0);
}
else
{
hipLaunchKernelGGL(( PPS_Prescan_Kernel<false, true>), dim3(grid), dim3(threads), sharedMemSize, 0, outArray, inArray, 0, numElements, 0, 0);
}
}
void ProgramCU::TopKPrefixScan(TopKData &topKData)
{
if(topKData.keypointsCount <= 0)
return;
topKData.numElementsAllocated = 0;
topKData.numLevelsAllocated = 0;
unsigned int numElements = topKData.keypointsCountAsPowerOfTwo;
unsigned int *devIdxs = (unsigned int *)topKData.keys;
unsigned int *devData = topKData.indices;
// clear keypoints flags to 0
hipMemset(devIdxs, 0, (numElements+1)*sizeof(unsigned int));
const int blocks = (numElements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// prepare input for prefix scan - mark selected keypoints by 1
hipLaunchKernelGGL(( MarkSelectedElements_Kernel), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, devIdxs, devData, numElements+1, topKData.topKCountThreshold);
PPS_PreallocBlockSums(topKData, numElements);
// run the prescan
PPS_PrescanArrayRecursive(topKData, devIdxs, devIdxs, numElements, 0);
PPS_DeallocBlockSums(topKData);
CheckErrorCUDA("TopKPrefixScan");
}
#undef THREADS_PER_BLOCK
void __global__ GetLevelsFeatureNum_Kernel(unsigned int *indices, int *levelFeatureNum, int levelsCount)
{
int threadID = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(threadID >= levelsCount)
return;
unsigned int index = levelFeatureNum[threadID];
levelFeatureNum[threadID] = indices[index];
}
void ProgramCU::TopKGetLevelsFeatureNum(TopKData &topKData)
{
if(topKData.levelsCount <= 0)
return;
hipMalloc((void**)&(topKData.devLevelFeaturesCount), topKData.levelsCount*sizeof(unsigned int));
hipMemcpy(topKData.devLevelFeaturesCount, topKData.levelFeaturesCount, topKData.levelsCount*sizeof(unsigned int), hipMemcpyHostToDevice);
int blockWidth = TOPK_BLOCK_SIZE;
dim3 grid((topKData.levelsCount + blockWidth - 1) / blockWidth);
dim3 block(blockWidth);
hipLaunchKernelGGL(( GetLevelsFeatureNum_Kernel), dim3(grid), dim3(block), 0, 0, (unsigned int *)topKData.keys, topKData.devLevelFeaturesCount, topKData.levelsCount);
hipMemcpy(topKData.levelFeaturesCount, topKData.devLevelFeaturesCount, topKData.levelsCount*sizeof(unsigned int), hipMemcpyDeviceToHost);
CheckErrorCUDA("TopKGetLevelsFeatureNum");
}
void __global__ CompactLevelFeatures_Kernel(float4 *outFeatures, unsigned int *indices, unsigned int offset, unsigned int featuresCount)
{
int threadID = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(threadID >= featuresCount)
return;
unsigned int index = indices[offset + threadID];
if(index == indices[offset + threadID + 1])
return;
unsigned int firstIndex = indices[offset];
index -= firstIndex;
float4 key = tex1Dfetch(texDataF4, threadID);
outFeatures[index] = key;
}
void ProgramCU::TopKCompactLevelFeatures(CuTexImage *list, unsigned int oldLen, float **newLevelFeatures, unsigned int newLen, TopKData &topKData, unsigned int offset)
{
if(newLen <= 0)
{
*newLevelFeatures = NULL;
return;
}
hipMalloc((void**)(newLevelFeatures), newLen*4*sizeof(float));
list->BindTexture(texDataF4);
int blockWidth = TOPK_BLOCK_SIZE;
dim3 grid((oldLen + blockWidth - 1) / blockWidth);
dim3 block(blockWidth);
hipLaunchKernelGGL(( CompactLevelFeatures_Kernel), dim3(grid), dim3(block), 0, 0, (float4 *)(*newLevelFeatures), (unsigned int *)topKData.keys, offset, oldLen);
CheckErrorCUDA("TopKCompactLevelFeatures");
}
#endif // (GPU_HESSIAN || GPU_SIFT_MODIFIED) && TOP_K_SELECTION
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
void ProgramCU::DetectionDataInit(int **featureTexLen, int len)
{
hipMalloc((void**)(featureTexLen), len*sizeof(int));
hipMemset(*featureTexLen, 0, len*sizeof(int));
CheckErrorCUDA("ProgramCU::DetectionDataInit");
}
void ProgramCU::DetectionDataDownload(int *dst, int *featureTexLen, int len)
{
hipMemcpy(dst, featureTexLen, len*sizeof(int), hipMemcpyDeviceToHost);
hipMemset(featureTexLen, 0, len*sizeof(int));
CheckErrorCUDA("ProgramCU::DetectionDataDownload");
}
void ProgramCU::DetectionDataFinish(int **featureTexLen)
{
hipFree(*featureTexLen);
*featureTexLen = NULL;
CheckErrorCUDA("ProgramCU::DetectionDataFinish");
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
hipDeviceSynchronize();
}
int ProgramCU::CheckErrorCUDA(const char* location)
{
hipError_t e = hipGetLastError();
if(e)
{
if(location)
fprintf(stderr, "%s:\t", location);
fprintf(stderr, "%s\n", hipGetErrorString(e));
// assert(0);
return 1;
}
else {
return 0;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if((col < width) && (row < height))
{
int index = row * width + col;
float value = tex1Dfetch(texData, index);
d_result[index] = ((col == 0) || (row == 0) || (col == width-1) || (row == height-1)) ? 0.5 : saturate(0.5+20.0*value);
}
}
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL)
return;
int width = dog->GetImgWidth();
int height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if((col < width) && (row < height))
{
int index = row * width + col;
float value = tex1Dfetch(texData, index << 1);
d_result[index] = ((col == 0) || (row == 0) || (col == width-1) || (row == height-1)) ? 0.0 : saturate(5.0*value);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL)
return;
int width = got->GetImgWidth();
int height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertGRD_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if((col < width) && (row < height))
{
int index = row * width + col;
float4 key = tex1Dfetch(texDataF4, index);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input: make_float4(result, dx, dy, ds); => dx, dy, ds ... subpixel localizations (otherwise zero)
// result = response 16b half float | 14b unused | 2b type
// type = FEATURE_TYPE_DARK_BLOB = 0
// FEATURE_TYPE_BRIGHT_BLOB = 1
// FEATURE_TYPE_SADDLE_POINT = 2
// FEATURE_TYPE_NONE = 3
// extract feature type
unsigned int typeValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA7.5
typeValue = typeValue & 0x00000003u;
// int is_key = (typeValue != FEATURE_TYPE_NONE);
#else
int is_key = ((key.x == 1.0f) || (key.x == -1.0f));
#endif // GPU_SIFT_MODIFIED || GPU_HESSIAN
int inside = (col > 0) && (row > 0) && (row < height-1) && (col < width-1);
float value = inside ? saturate(0.5 + 20.0 * tex1Dfetch(texData, index)) : 0.5;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
float4 result = make_float4(value, value, value, 0.0f);
switch(typeValue)
{
case FEATURE_TYPE_DARK_BLOB:
result = inside ? make_float4(1.0f, 0.0f, 0.0f, 1.0f) : result;
break;
case FEATURE_TYPE_BRIGHT_BLOB:
result = inside ? make_float4(0.0f, 1.0f, 0.0f, 1.0f) : result;
break;
case FEATURE_TYPE_SADDLE_POINT:
result = inside ? make_float4(0.0f, 0.0f, 1.0f, 1.0f) : result;
break;
case FEATURE_TYPE_NONE:
default:
break;
}
d_result[index] = result;
// if((typeValue != HESSIAN_FEATURE_TYPE_NONE) && inside)
// {
// d_result[index-1] = result;
// d_result[index+1] = result;
// d_result[index-width] = result;
// d_result[index+width] = result;
// }
#else
d_result[index] = (is_key && inside) ? ((key.x > 0) ? make_float4(1.0f, 0.0f, 0.0f, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)) : make_float4(value, value, value, 1.0f);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL)
return;
int width = key->GetImgWidth();
int height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num)
return;
float4 key = tex1Dfetch(texDataF4, idx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
d_result[idx] = make_float4(key.x, key.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyPoint_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num)
return;
int kidx = idx / 10;
int vidx = idx - IMUL(kidx , 10);
// fetch feature/keypoint
float4 key = tex1Dfetch(texDataF4, kidx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14 unused | scale 16b-8.8
// key.w: orientation
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
float sz = fabs(key.z * 3.0f);
///////////////////////
float s, c;
__sincosf(key.w, &s, &c);
///////////////////////
float dx = (vidx == 0) ? 0 : (((vidx <= 4) || (vidx >= 9)) ? sz : -sz);
float dy = (vidx <= 1) ? 0 : (((vidx <= 2) || (vidx >= 7)) ? -sz : sz);
float4 pos;
pos.x = key.x + c * dx - s * dy;
pos.y = key.y + c * dy + s * dx;
pos.z = 0;
pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyBox_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage::BindTexture(textureReference& texRef)
{
hipBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
#if defined(SIFTGPU_ENABLE_LINEAR_TEX2D)
if ((_imgWidth*_numChannel*sizeof(float)) % 32)
std::cout<<"Warning: Row length should be multiply of 32 !"<<std::endl;
hipBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth*_numChannel*sizeof(float));
#else
hipChannelFormatDesc desc;
hipGetChannelDesc(&desc, _cuData2D);
hipBindTextureToArray(&texRef, _cuData2D, &desc);
#endif
}
int ProgramCU::CheckCudaDevice(int device)
{
int count = 0, device_used;
if((hipGetDeviceCount(&count) != hipSuccess) || (count <= 0))
{
ProgramCU::CheckErrorCUDA("CheckCudaDevice");
return 0;
}
else if(count == 1)
{
hipDeviceProp_t deviceProp;
if((hipGetDeviceProperties(&deviceProp, 0) != hipSuccess) || ((deviceProp.major == 9999) && (deviceProp.minor == 9999)))
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n");
return 0;
}
else
{
GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024;
GlobalUtil::_texMaxDimGL = 32768;
if(GlobalUtil::_verbose)
fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL);
}
}
if((device > 0) && (device < count))
{
hipSetDevice(device);
CheckErrorCUDA("hipSetDevice\n");
}
hipGetDevice(&device_used);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// we need CC 2.0 at least for feature list construction using atomics and topk selection
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device_used);
if(deviceProp.major < 2)
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA CC 2.X or higher. Your device has just CC %d.%d.\n", deviceProp.major, deviceProp.minor);
fprintf(stderr, " Disable GENERATE_FEATURE_LIST_USING_ATOMICS and TOP_K_SELECTION in config.h and rebuild project.\n");
return 0;
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
if(device != device_used)
fprintf(stderr, "\nERROR: Cannot set device to %d\n"
"\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count);
return 1;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, hipReadModeElementType> texDes1;
texture<uint4, 1, hipReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
ProgramCU::CheckErrorCUDA("MultiplyDescriptor");
}
texture<float, 1, hipReadModeElementType> texLoc1;
texture<float2, 1, hipReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x);
diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y);
if(diff[0] < hdistmax && diff[1] < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0];
ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1];
//ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2];
x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float H[3][3], float hdistmax, float F[3][3], float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptorG_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, hipReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];//
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
texDot->BindTexture(texDOT);
hipLaunchKernelGGL(( RowMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, hipReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
hipLaunchKernelGGL(( ColMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
| 2275323cfb69043a92902badc16c0f3cb5d5e6a2.cu | ////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to ccwu@cs.unc.edu
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include "stdio.h"
#include <iostream>
#include <assert.h>
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//----------------------------------------------------------------
//Begin SiftGPU setting section.
//////////////////////////////////////////////////////////
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
/////////////////////////////////////////////////////////
//filter kernel width range (don't change this)
#define KERNEL_MAX_WIDTH 33
#define KERNEL_MIN_WIDTH 5
//////////////////////////////////////////////////////////
//horizontal filter block size (32, 64, 128, 256, 512)
#define FILTERH_TILE_WIDTH 128
//thread block for vertical filter. FILTERV_BLOCK_WIDTH can be (4, 8 or 16)
#define FILTERV_BLOCK_WIDTH 16
#define FILTERV_BLOCK_HEIGHT 32
//The corresponding image patch for a thread block
#define FILTERV_PIXEL_PER_THREAD 4
#define FILTERV_TILE_WIDTH FILTERV_BLOCK_WIDTH
#define FILTERV_TILE_HEIGHT (FILTERV_PIXEL_PER_THREAD * FILTERV_BLOCK_HEIGHT)
//////////////////////////////////////////////////////////
//thread block size for computing Difference of Gaussian
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
#define DOG_BLOCK_DIMX (1 << DOG_BLOCK_LOG_DIMX)
#define DOG_BLOCK_DIMY (1 << DOG_BLOCK_LOG_DIMY)
//////////////////////////////////////////////////////////
//thread block size for keypoint detection
#define KEY_BLOCK_LOG_DIMX 5 // 3
#define KEY_BLOCK_LOG_DIMY 2 // 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//make KEY_BLOCK_LOG_DIMX 4 will make the write coalesced..
//but it seems uncoalesced writes don't affect the speed
//////////////////////////////////////////////////////////
//thread block size for initializing list generation (64, 128, 256, 512 ...)
#define HIST_INIT_WIDTH 128
//thread block size for generating feature list (32, 64, 128, 256, 512, ...)
#define LISTGEN_BLOCK_DIM 128
/////////////////////////////////////////////////////////
//how many keypoint orientations to compute in a block
#define ORIENTATION_COMPUTE_PER_BLOCK 64
//how many keypoint descriptor to compute in a block (2, 4, 8, 16, 32)
#define DESCRIPTOR_COMPUTE_PER_BLOCK 4
#define DESCRIPTOR_COMPUTE_BLOCK_SIZE (16 * DESCRIPTOR_COMPUTE_PER_BLOCK)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// block size for the keypoint descriptor normalization kernel
// it is assumed that one descriptor is processed by one warp
// -> have to be multiple of warp size (32)
#define DESCRIPTOR_NORMALIZE_PER_BLOCK 128
#else
//how many keypoint descriptor to normalized in a block (32, ...)
#define DESCRIPTOR_NORMALIZE_PER_BLOCK 32
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
///////////////////////////////////////////
//Thread block size for visualization
//(This doesn't affect the speed of computation)
#define BLOCK_LOG_DIM 4
#define BLOCK_DIM (1 << BLOCK_LOG_DIM)
//End SiftGPU setting section.
//----------------------------------------------------------------
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, cudaReadModeElementType> texData;
texture<unsigned char, 1, cudaReadModeNormalizedFloat> texDataB;
texture<float2, 2, cudaReadModeElementType> texDataF2;
texture<float4, 1, cudaReadModeElementType> texDataF4;
texture<int4, 1, cudaReadModeElementType> texDataI4;
texture<int4, 1, cudaReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width)
return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
// add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
// make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
// make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
// make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_BLOCK_HEIGHT - 1) / FILTERV_BLOCK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_BLOCK_HEIGHT -1) / FILTERV_BLOCK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_BLOCK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_BLOCK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_BLOCK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width)
return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i, row += FILTERV_BLOCK_HEIGHT, index_start += FILTERV_BLOCK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width)
return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx = (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1:
UpsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, width);
break;
case 2:
UpsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, width);
break;
case 3:
UpsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, width);
break;
default:
break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width)
return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width)
return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth();
int dst_width = dst->GetImgWidth();
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1:
DownsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width);
break;
case 2:
DownsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width);
break;
case 3:
DownsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width);
break;
default:
DownsampleKernel <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth();
int height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1) / FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
ChannelReduce_Convert_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
else
{
src->BindTexture(texData);
ChannelReduce_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
}
__global__ void ConvertByteToFloat_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texDataB, index);
}
void ProgramCU::ConvertByteToFloat(CuTexImage*src, CuTexImage* dst)
{
int width = src->GetImgWidth();
int height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1) / FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
src->BindTexture(texDataB);
ConvertByteToFloat_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ); //
width = 2*sz + 1;
if(width > KERNEL_MAX_WIDTH)
{
//filter size truncation
sz = KERNEL_MAX_WIDTH >> 1;
width = KERNEL_MAX_WIDTH;
}
else if(width < KERNEL_MIN_WIDTH)
{
sz = KERNEL_MIN_WIDTH >> 1;
width =KERNEL_MIN_WIDTH;
}
float rv = 1.0f / (sigma*sigma), v, ksum = 0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f / ksum;
for(i = 0; i< width ;i++)
kernel[i] *= rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth();
int height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
FilterH<FW><<<gridh, blockh>>>((float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_BLOCK_HEIGHT);
FilterV<FW><<<gridv, blockv>>>((float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// OpenGL version : 18ms
// CUDA version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH];
int width;
CreateFilterKernel(sigma, filter_kernel, width);
cudaMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, cudaMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
texture<float, 1, cudaReadModeElementType> texC;
texture<float, 1, cudaReadModeElementType> texP;
texture<float, 1, cudaReadModeElementType> texN;
#ifdef GPU_HESSIAN
texture<float, 1, cudaReadModeElementType> texG;
// compute 3x3 Hessian values from symmetric differences
#define COMPUTE_HESSIAN(tex, idx) \
float v11 = tex1Dfetch(tex, idx - width - 1); \
float v12 = tex1Dfetch(tex, idx - width); \
float v13 = tex1Dfetch(tex, idx - width + 1); \
\
float v21 = tex1Dfetch(tex, idx - 1); \
float v22 = tex1Dfetch(tex, idx); \
float v23 = tex1Dfetch(tex, idx + 1); \
\
float v31 = tex1Dfetch(tex, idx + width - 1); \
float v32 = tex1Dfetch(tex, idx + width); \
float v33 = tex1Dfetch(tex, idx + width + 1); \
\
float Lxx = (v21 - 2.0f*v22 + v23); \
float Lyy = (v12 - 2.0f*v22 + v32); \
float Lxy = (v13 - v11 + v31 - v33) * 0.25f; \
void __global__ ComputeHessian_Kernel(float *hessian, float2 *got, int width, int height, float norm)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if((col < width) && (row < height))
{
int index = IMUL(row, width) + col;
COMPUTE_HESSIAN(texC, index)
// compute determinant of hessian matrix, normalize and write out
hessian[index] = (Lxx*Lyy - Lxy*Lxy)*norm;
// precompute gradient and rotation
float dx = v23 - v21;
float dy = v32 - v12;
float gradient = 0.5f * sqrt(dx*dx + dy*dy);
float rot = ((gradient == 0.0f) ? 0.0f : atan2(dy, dx));
got[index] = make_float2(gradient, rot);
}
}
void __global__ ComputeHessian_Kernel(float *hessian, int width, int height, float norm)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if((col < width) && (row < height))
{
int index = IMUL(row, width) + col;
COMPUTE_HESSIAN(texC, index)
// compute determinant of hessian matrix, normalize and write out
hessian[index] = (Lxx*Lyy - Lxy*Lxy)*norm;
}
}
void ProgramCU::ComputeHessian(CuTexImage* gus, CuTexImage* dog, CuTexImage* got, float norm)
{
int width = gus->GetImgWidth();
int height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1) / DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1) / DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
if(got->_cuData)
ComputeHessian_Kernel<<<grid, block>>>((float*)dog->_cuData, (float2*)got->_cuData, width, height, norm*norm);
else
ComputeHessian_Kernel<<<grid, block>>>((float*)dog->_cuData, width, height, norm*norm);
}
#else
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp;
float dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f ? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth();
int height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, width, height);
}
#endif // GPU_HESSIAN
#ifdef GPU_HESSIAN
// GPU_HESSIAN: added test (response<0) and (response>0)
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1); \
datai[1] = tex1Dfetch(tex, idx); \
datai[2] = tex1Dfetch(tex, idx + 1); \
if(response > nmax) \
{ \
nmax = max(nmax, datai[0]); \
nmax = max(nmax, datai[1]); \
nmax = max(nmax, datai[2]); \
if((response < nmax) || (response < 0)) \
goto key_finish; \
} \
else \
{ \
nmin = min(nmin, datai[0]); \
nmin = min(nmin, datai[1]); \
nmin = min(nmin, datai[2]); \
if((response > nmin) || (response > 0)) \
goto key_finish; \
}
#else
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1); \
datai[1] = tex1Dfetch(tex, idx); \
datai[2] = tex1Dfetch(tex, idx + 1); \
if(response > nmax) \
{ \
nmax = max(nmax, datai[0]); \
nmax = max(nmax, datai[1]); \
nmax = max(nmax, datai[2]); \
if(response < nmax) \
goto key_finish; \
} \
else \
{ \
nmin = min(nmin, datai[0]); \
nmin = min(nmin, datai[1]); \
nmin = min(nmin, datai[2]); \
if(response > nmin) \
goto key_finish; \
}
#endif // GPU_HESSIAN
void __global__ ComputeKEY_Kernel(float4 *d_key, int width, int colmax, int rowmax, float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
, int *featureTexLen
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
)
{
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x;
float data[3][3];
float datap[3][3];
float datan[3][3];
float response = 0.0f;
int index = IMUL(row, width) + col;
int idx[3] = {index - width, index, index + width};
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
int in_image = 0;
bool offset_test_passed = true;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
unsigned short pointType = FEATURE_TYPE_NONE;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
if((row > 0) && (col > 0) && (row < rowmax) && (col < colmax))
{
in_image = 1;
data[1][1] = response = tex1Dfetch(texC, idx[1]);
if(fabs(response) <= dog_threshold0)
goto key_finish;
// fetch left and right neighbour
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if((response <= nmax) && (response >= nmin))
goto key_finish;
//if((response > nmax && response < 0 )|| (response < nmin && response > 0)) goto key_finish;
// fetch values from the row above
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
// fetch values from one the row below
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
// edge supression
float vx2 = response * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if((temp1 <= 0) || (temp2 > edge_threshold * temp1))
goto key_finish; // local neighbourhood looks like an edge
// read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
// read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
// subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f * (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f * (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
// need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = (fxx > 0) ? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = (fxy > 0) ? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = (fxs > 0) ? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}
else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w /= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
response = data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs);
offset_test_passed = (fabs(response) > dog_threshold) && (fabs(ds) < 1.0f) && (fabs(dx) < 1.0f) && (fabs(dy) < 1.0f);
}
}
}
}
if(offset_test_passed)
#if defined GPU_HESSIAN
{
// find blob point type from Hessian matrix H, we know that:
// - if H is positive definite it is a DARK blob
// - if H is negative definite it is a BRIGHT blob
// - det H is negative it is a SADDLE point
data[1][1] = tex1Dfetch(texG, idx[1]);
data[1][0] = tex1Dfetch(texG, idx[1] - 1);
data[1][2] = tex1Dfetch(texG, idx[1] + 1);
if(response < 0)
{
pointType = FEATURE_TYPE_SADDLE_POINT;
}
else
{
// at this point we know that 2x2 determinant is positive
// so only check the remaining 1x1 subdeterminant
float Lxx = data[1][0] - 2*data[1][1] + data[1][2];
pointType = (Lxx > 0) ? FEATURE_TYPE_DARK_BLOB : FEATURE_TYPE_BRIGHT_BLOB;
}
}
#elif defined GPU_SIFT_MODIFIED
result = (response > nmax) ? FEATURE_TYPE_BRIGHT_BLOB : FEATURE_TYPE_DARK_BLOB;
#else
result = (response > nmax) ? 1.0 : -1.0;
#endif // GPU_HESSIAN / GPU_SIFT_MODIFIED
}
key_finish:
if(in_image)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
// result: response 16b | 14b unused | 2b type
unsigned int uspack = (((unsigned int)__float2half_rn(response)) << 16) | 0x00000004u | pointType;
result = *((float *)(&uspack)); // __uint_as_float(uspack); // CUDA 7.5
d_key[index] = make_float4(result, dx, dy, ds);
}
#else
d_key[index] = make_float4(result, dx, dy, ds);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
int count = __syncthreads_count(pointType != FEATURE_TYPE_NONE);
if(threadIdx.x+threadIdx.y*blockDim.x == 0)
{
atomicAdd(featureTexLen, count);
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key
#if defined GPU_HESSIAN
, CuTexImage* gus
#endif // GPU_HESSIAN
, float Tdog, float Tedge
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
, int *featureTexLen, int featureTexIdx
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
)
{
int width = dog->GetImgWidth();
int height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization ? 0.8f : 1.0f) * Tdog;
CuTexImage *dogp = dog - 1;
CuTexImage *dogn = dog + 1;
dim3 grid((width + KEY_BLOCK_DIMX - 1)/KEY_BLOCK_DIMX, (height + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog->BindTexture(texC);
dogn->BindTexture(texN);
#if defined GPU_HESSIAN
gus->BindTexture(texG);
#endif // GPU_HESSIAN
Tedge = (Tedge+1)*(Tedge+1) / Tedge;
ComputeKEY_Kernel<<<grid, block>>>((float4*)key->_cuData, width, width-1, height-1, Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
, featureTexLen + featureTexIdx
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
);
}
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
#define GENERATE_LIST_BLOCK_DIMX 32
#define GENERATE_LIST_BLOCK_DIMY 4
__device__ int GFL_WarpScan(int val, volatile int *sData, int threadID)
{
// pad each warp with zeros
// int idx = 2*threadIdx.x - (threadIdx.x & (warpSize-1)); // 1D
// int id = threadIdx.x + threadIdx.y*blockDim.x; // 2D
// int idx = 2*id - (id & (warpSize-1));
int idx = 2*threadID - (threadID & (warpSize-1));
sData[idx] = 0;
idx += warpSize;
int t = sData[idx] = val;
sData[idx] = t = t + sData[idx - 1];
sData[idx] = t = t + sData[idx - 2];
sData[idx] = t = t + sData[idx - 4];
sData[idx] = t = t + sData[idx - 8];
sData[idx] = t = t + sData[idx - 16];
return sData[idx-1];
}
__device__ unsigned int GFL_LaneMaskLt(int threadID)
{
// const unsigned int lane = threadIdx.x & (warpSize-1); // 1D block
// const unsigned int lane = (threadIdx.x + threadIdx.y*blockDim.x) & (warpSize-1); // 2D block
const unsigned int lane = threadID & (warpSize-1);
return (1 << (lane)) - 1;
}
__device__ unsigned int GFL_WarpPrefixSums(bool p, int threadID)
{
const unsigned int mask = GFL_LaneMaskLt(threadID);
unsigned int b = __ballot(p);
return __popc(b & mask);
}
__device__ int GFL_BlockBinaryPrefixSums(int x, int idx)
{
extern __shared__ int sData[];
//int idx = threadIdx.x + threadIdx.y*blockDim.x;
// A. Compute exclusive prefix sums within each warp
int warpPrefix = GFL_WarpPrefixSums(x, idx);
// int idx = threadIdx.x; // 1D
// int idx = threadIdx.x + threadIdx.y*blockDim.x; // 2D
int warpIdx = idx / warpSize;
int laneIdx = idx & (warpSize - 1);
// B. The last thread of each warp stores inclusive
// prefix sum to the warp’s index in shared memory
if(laneIdx == warpSize - 1)
sData[warpIdx] = warpPrefix + x;
__syncthreads();
// C. One warp scans the warp partial sums
if(idx < warpSize)
sData[idx] = GFL_WarpScan(sData[idx], sData, idx);
__syncthreads();
// D. Each thread adds prefix sums of warp partial
// sums to its own intra-warp prefix sums
return warpPrefix + sData[warpIdx];
}
void __global__ ListGen_Kernel(int4* d_list, int len, int width, int height, int *counter)
{
int row = IMUL(blockIdx.y, GENERATE_LIST_BLOCK_DIMY) + threadIdx.y;
int col = IMUL(blockIdx.x, GENERATE_LIST_BLOCK_DIMX) + threadIdx.x;
// read the detected keypoint type -> flag=0 (type == FEATURE_TYPE_NONE) / 1 (otherwise)
unsigned int flag = 0;
if((row > 0) && (col > 0) && (row < height-1) && (col < width-1))
{
int index = IMUL(row, width) + col;
float4 value = tex1Dfetch(texDataF4, index);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// value: (response 16b | 14b unused | 2b type), dx, dy, ds
// type = *((unsigned int *)(&offset.x)) & 0x00000003u;
flag = ((*((unsigned int *)(&value.x)) & 0x00000003u) != FEATURE_TYPE_NONE) ? 1 : 0;
#else
// value: (response 16b | 16b type), dx, dy, ds ... type = -1, 0, +1
unsigned int resultUInt = *((unsigned int*)(&value.x)) & 0x0000FFFFu; // float_as_uint(value.x) & 0x0000FFFFu; // CUDA 7.5
float result = __half2float(resultUInt);
flag = (fabs(result) > 0.5f) ? 1 : 0; // flag = (result != 0.0f) ? 1 : 0;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
int idxWithinBlock = IMUL(threadIdx.y, blockDim.x) + threadIdx.x;
// compute prefix sums for each thread int the block
int blockPrefixSum = GFL_BlockBinaryPrefixSums(flag, idxWithinBlock);
// allocate enough space in the feature list to store all features in this block
// number of features can be obtained by __syncthreads_count() or it is given
// by the prefix sum of the last thread in the block plus one (if the last thread
// represents detected feature)
__shared__ int blockStart; // index in the feature list of the first keypoint in the block
// int count = __syncthreads_count(flag);
// if(idxWitninBlock == 0)
// {
// blockStart = atomicAdd(counter, count);
// }
// __syncthreads();
if(idxWithinBlock == IMUL(blockDim.y, blockDim.x) - 1)
{
blockStart = atomicAdd(counter, blockPrefixSum + flag);
}
__syncthreads();
// put detected keypoint into the feature list
if(flag)
d_list[blockStart + blockPrefixSum] = make_int4(col, row, 0, 0);
}
#else
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if((row < height) && (col < wd))
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
// each thread process 4 subsequent colums values in the same row
if((row > 0) && (row < height-1))
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx+i);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
unsigned int featureType = *((unsigned int*)(&temp.x)) & 0x00000003u; // float_as_uint(temp.x) & 0x00000003u; // CUDA 7.5
v[i] = ((scol < ws-1) && (scol > 0) && (featureType != FEATURE_TYPE_NONE)) ? 1 : 0;
#else
v[i] = ((scol < ws-1) && (scol > 0) && (temp.x != 0.0)) ? 1 : 0;
#endif // GPU_SIFT_MODIFIED || GPU_HESSIAN
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth();
int hs = key->GetImgHeight();
int wd = hist->GetImgWidth();
int hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1) / HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
InitHist_Kernel<<<grid, block>>>((int4*) hist->_cuData, ws, wd, hd);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if((row < height) && (col < wd))
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; (i < 4) && (scol < ws); ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage* hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth();
int hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth();
int hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2 / 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi;
const int BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1) / BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
ReduceHist_Kernel<<<grid, block>>>((int4*)hist2->_cuData, ws, wd, hd);
}
void __global__ ListGen_Kernel(int4* d_list, int len, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// GPU_HESSIAN fix
if(idx1 >= len)
return;
#endif // GPU_HESSIAN
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}
else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}
else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
// input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* key, int *counter)
{
int len = list->GetImgWidth();
key->BindTexture(texDataF4);
int width = key->GetImgWidth();
int height = key->GetImgHeight();
dim3 grid((width + GENERATE_LIST_BLOCK_DIMX - 1)/GENERATE_LIST_BLOCK_DIMX, (height + GENERATE_LIST_BLOCK_DIMY - 1)/GENERATE_LIST_BLOCK_DIMY);
dim3 block(GENERATE_LIST_BLOCK_DIMX, GENERATE_LIST_BLOCK_DIMY);
// shared memory is used to store warp scan data
ListGen_Kernel<<<grid, block, 2*32*sizeof(int)>>>((int4*)list->_cuData, len, width, height, counter);
}
#else
// input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) / LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
ListGen_Kernel<<<grid, block>>>((int4*)list->_cuData, len, hist->GetImgWidth());
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
void __global__ ComputeOrientation_Kernel(float4* d_list, int list_len, int width, int height, float sigma, float sigma_step, float gaussian_factor, float sample_factor,
int num_orientation, int existing_keypoint, int subpixel, int keepsign
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
, bool doHalfSIFT
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len)
return;
float4 key;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
union {
float flt; // include response (16b), unused (14b), feature type (2b)
unsigned int uint;
} additionalData;
additionalData.flt = 0.0f;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
// read the data
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// unpack scale, x, and y
// later store just the strongest computed orientation
// existing keypoint input
// key.x: response 8b H (cleared to zero) | x 24b-14.10
// key.y: response 8b L (cleared to zero) | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation (if any)
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
if(subpixel || keepsign)
{
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
// offset: x(response 16b | 14b unused | 2b type), dx, dy, ds
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
additionalData.flt = offset.x;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
if(keepsign) // not supported for hessian
key.z *= offset.x;
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
}
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
}
if(num_orientation == 0)
{
key.w = 0;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
goto key_store_finish;
#else
d_list[idx] = key;
return;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i)
vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
float dy = y - key.y;
dy *= dy;
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float sq_dist = dx * dx + dy;
if(sq_dist >= dist_threshold)
continue;
float2 got = tex2D(texDataF2, x, y);
// float weight = got.x * exp(sq_dist * factor);
// float fidx = floorf(got.y * ten_degree_per_radius);
// int oidx = fidx;
int oidx = (int)floorf(got.y * ten_degree_per_radius);
if(oidx < 0)
oidx += 36;
vote[oidx] += got.x * expf(sq_dist * factor); // vote[oidx] += weight;
}
}
// filter the vote
const float one_third = 1.0 / 3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j];
vote[j] = temp;
}
}
vote[36] = vote[0];
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
if(doHalfSIFT)
{
#pragma unroll
for(int i = 0; i < 18; i++)
{
vote[i] += vote[i+18];
vote[i+18] = 0;
}
}
int orientationsCount = 0;
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
// just one orientation
if((num_orientation == 1) || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = (vote[i] > max_vote) ? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[(index_max == 0) ? 35 : index_max - 1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
d_list[idx] = key;
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
}
// multi-orientations allowed
else
{
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// up to 4 orientations may be stored
// max number of stored orientations depends on num_orientations parameter (1..4)
#define MAX_ORIENTATIONS 4
// find the maximum value
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_vot[MAX_ORIENTATIONS+1];
float max_rot[MAX_ORIENTATIONS+1];
#pragma unroll
for(int i=0; i < 36; ++i)
{
float next = vote[i + 1];
if((vote[i] > vote_threshold) && (vote[i] > pre) && (vote[i] > next)) // max from neighbours
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
int idx = orientationsCount;
if(orientationsCount > 0)
{
// shift values
while((idx > 0) && (max_vot[idx-1] < weight)) {
max_vot[idx] = max_vot[idx-1];
max_rot[idx] = max_rot[idx-1];
idx--;
}
}
// store maximum found
max_vot[idx] = weight;
max_rot[idx] = rot;
if(orientationsCount < MAX_ORIENTATIONS)
orientationsCount++;
}
pre = vote[i];
}
unsigned int packedOrientations = 0;
// first 4 orientations (if exist)
unsigned int maxCount = min(4, orientationsCount);
int idx = 0;
for(; idx < maxCount; idx++)
{
float orientation = max_rot[idx] / 36.0f;
if(orientation < 0)
orientation += 1.0f;
unsigned int uiOrientation = (unsigned int) floorf(orientation * 255.0f);
packedOrientations = packedOrientations | (uiOrientation << 8*idx);
}
key.w = *((float *)(&packedOrientations)); // __uint_as_float(packedOrientations); // CUDA 7.5
#else
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i=0; i < 36; ++i)
{
float next = vote[i + 1];
if((vote[i] > vote_threshold) && (vote[i] > pre) && (vote[i] > next))
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0)
fr1 += 1.0f;
unsigned short us1 = (ocount == 0) ? 65535 : ((unsigned short)floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0)
fr2 += 1.0f;
us2 = (unsigned short) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = *((float *)(&uspack)); // __uint_as_float(uspack); // CUDA 7.5
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
d_list[idx] = key;
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
key_store_finish:
// input:
// additionalData: response 16b | 14b unused | 2b type
// output in the feature list d_list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 3b orientations count | 11b unused | scale 16b-8.8
// key.w: 8b orientation1 | 8b orientation2 | 8b orientation3 | 8b orientation4
if(!existing_keypoint)
{
unsigned int posX = (unsigned int)FLOAT_TO_FIXED_POINT(key.x, FIXED_POINT_POSITION_PRECISION_BITS);
posX = posX & FIXED_POINT_POSITION_MASK;
unsigned int posY = (unsigned int)FLOAT_TO_FIXED_POINT(key.y, FIXED_POINT_POSITION_PRECISION_BITS);
posY = posY & FIXED_POINT_POSITION_MASK;
// store response
posX = posX | (additionalData.uint & FIXED_POINT_RESPONSE_MASK);
posY = posY | ((additionalData.uint << 8) & FIXED_POINT_RESPONSE_MASK);
unsigned int scale = (unsigned int)(FLOAT_TO_FIXED_POINT(key.z, FIXED_POINT_SCALE_PRECISION_BITS));
scale = scale & FIXED_POINT_SCALE_MASK;
// type & orientations count (0 means single float value in key.w, otherwise we have to unpack 8b orientations into floats)
scale = scale | ((additionalData.uint & 0x00000003u) << 30) | ((orientationsCount & 0x00000007u) << 27);
key.z = *((float *)(&scale)); // __uint_as_float(scale); // CUDA 7.5
key.x = *((float *)(&posX)); // __uint_as_float(posX); // CUDA 7.5
key.y = *((float *)(&posY)); // __uint_as_float(posY); // CUDA 7.5
d_list[idx] = key;
}
else
{
// for existing keypoint just overwrite computed orientation and other components remain the same (x, y, and scale)
// ReshapeFeatureListCPU() is not called when we have just one orientation
d_list[idx].w = key.w;
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage* key, float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0)
return;
int width = got->GetImgWidth();
int height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}
else
{
list->BindTexture(texDataList);
#if !defined GPU_HESSIAN && !defined GPU_SIFT_MODIFIED
if(GlobalUtil::_SubpixelLocalization)
#endif // !GPU_HESSIAN && !GPU_SIFT_MODIFIED
key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = (len < ORIENTATION_COMPUTE_PER_BLOCK) ? 16 : ORIENTATION_COMPUTE_PER_BLOCK;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
ComputeOrientation_Kernel<<<grid, block>>>(
(float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation ? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
, GlobalUtil::_HalfSIFT
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
template <bool DYNAMIC_INDEXING, bool HALF_SIFT>
#else
template <bool DYNAMIC_INDEXING>
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
void __global__ ComputeDescriptor_Kernel(float4* d_des, int num, int width, int height, float window_factor)
{
const float rpi = 4.0 / PI;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num)
return;
// fetch the feature
float4 key = tex1Dfetch(texDataF4, fidx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
int bidx = idx & 0xf;
int ix = bidx & 0x3;
int iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c;
__sincosf(key.w, &s, &c);
float anglef = (key.w > PI) ? (key.w - (2.0 * PI)) : key.w;
float cspt = c * spt;
float sspt = s * spt;
float crspt = c / spt;
float srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i = 0; i < 9; ++i)
des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if((nxn < 1.0f) && (nyn < 1.0f))
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = expf(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0)
theta += 8.0f;
float fo = floorf(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
// this dynamic indexing part might be slow
}
else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
if(HALF_SIFT)
{
// half sift -> 4 directions only
des[0] += des[4];
des[1] += des[5];
des[2] += des[6];
des[3] += des[7];
d_des[idx] = make_float4(des[0], des[1], des[2], des[3]);
return;
}
else
{
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
// full sift -> 8 directions
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
template <bool DYNAMIC_INDEXING, bool HALF_SIFT>
#else
template <bool DYNAMIC_INDEXING>
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
void __global__ ComputeDescriptorRECT_Kernel(float4* d_des, int num, int width, int height, float window_factor)
{
const float rpi = 4.0 / PI;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num)
return;
// fetch the feature
float4 key = tex1Dfetch(texDataF4, fidx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation1 orientation2
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
int bidx = idx & 0xf;
int ix = bidx & 0x3;
int iy = bidx >> 2;
// float aspect_ratio = key.w / key.z;
// float aspect_sq = aspect_ratio * aspect_ratio;
float sptx = key.z * 0.25;
float spty = key.w * 0.25;
float xmin, ymin, xmax, ymax;
float2 pt;
pt.x = sptx * (ix + 0.5f) + key.x;
pt.y = spty * (iy + 0.5f) + key.y;
xmin = max(1.5f, floorf(pt.x - sptx) + 0.5f);
ymin = max(1.5f, floorf(pt.y - spty) + 0.5f);
xmax = min(width - 1.5f, floorf(pt.x + sptx) + 0.5f);
ymax = min(height - 1.5f, floorf(pt.y + spty) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i)
des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float nx = (x - pt.x) / sptx;
float ny = (y - pt.y) / spty;
float nxn = fabs(nx);
float nyn = fabs(ny);
if((nxn < 1.0f) && (nyn < 1.0f))
{
float2 cc = tex2D(texDataF2, x, y);
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = wx * wy * cc.x;
float theta = (- cc.y) * rpi;
if(theta < 0)
theta += 8.0f;
float fo = floorf(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
// this dynamic indexing part might be slow
}
else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
if(HALF_SIFT)
{
// half sift -> 4 directions only
des[0] += des[4];
des[1] += des[5];
des[2] += des[6];
des[3] += des[7];
d_des[idx] = make_float4(des[0], des[1], des[2], des[3]);
return;
}
else
{
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
// full sift -> 8 directions
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
#if (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED) && defined NORMALIZE_DESCRIPTOR_PER_WARP
texture<float2, 1, cudaReadModeElementType> texDataH2;
__device__ float ND_WarpReduction(volatile float *sData, int idx)
{
int warpIdx = idx & (warpSize-1); // index within warp
// parallel reduction within warp - just half of the warp is active
if(warpIdx < 16)
{
sData[idx] += sData[idx + 16];
sData[idx] += sData[idx + 8];
sData[idx] += sData[idx + 4];
sData[idx] += sData[idx + 2];
sData[idx] += sData[idx + 1];
}
return sData[idx-warpIdx]; // first thread in the warp
}
// assumes the block size (128, 1, 1) and grid size (Bx, 1, 1)
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
// size => numThreadsPerBlock * sizeof(float); numThreadsPerBlock must be multiple of 32
extern __shared__ volatile float reductionCache[];
// int globalIdx = threadIdx.x + IMUL(blockIdx.x + IMUL(blockIdx.y, gridDim.x), blockDim.x); // 2D grid
int globalIdx = threadIdx.x + IMUL(blockIdx.x, blockDim.x); // 1D grid
int localIdx = threadIdx.x;
while(globalIdx < 32*num)
{
// the vector is first normalized to unit length, thus adjusting for changing image contrast
float4 temp = tex1Dfetch(texDataF4, globalIdx);
float norm1 = (temp.x*temp.x + temp.y*temp.y + temp.z*temp.z + temp.w*temp.w);
reductionCache[localIdx] = norm1;
// __syncthreads(); // threads in warp are always sync
norm1 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... then all feature dimensions are thresholded to a maximum value of 0.2
temp.x = min(0.2f, temp.x * norm1);
temp.y = min(0.2f, temp.y * norm1);
temp.z = min(0.2f, temp.z * norm1);
temp.w = min(0.2f, temp.w * norm1);
float norm2 = (temp.x*temp.x + temp.y*temp.y + temp.z*temp.z + temp.w*temp.w);
reductionCache[localIdx] = norm2;
// __syncthreads(); // threads in warp are always sync
norm2 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... and the vector is again normalized to unit length
temp.x *= norm2;
temp.y *= norm2;
temp.z *= norm2;
temp.w *= norm2;
d_des[globalIdx] = temp;
// move to the next descriptor, if there is any unprocessed
globalIdx += IMUL(gridDim.x, blockDim.x);
}
}
// assumes the block size (128, 1, 1) and grid size (Bx, 1, 1)
// version used for half sift
void __global__ NormalizeDescriptor_Kernel(float2* d_des, int num)
{
// size => numThreadsPerBlock * sizeof(float); numThreadsPerBlock must be multiple of 32
extern __shared__ volatile float reductionCache[];
// int globalIdx = threadIdx.x + IMUL(blockIdx.x + IMUL(blockIdx.y, gridDim.x), blockDim.x); // 2D grid
int globalIdx = threadIdx.x + IMUL(blockIdx.x, blockDim.x); // 1D grid
int localIdx = threadIdx.x;
while(globalIdx < 32*num)
{
// the vector is first normalized to unit length, thus adjusting for changing image contrast
float2 temp = tex1Dfetch(texDataH2, globalIdx);
float norm1 = (temp.x*temp.x + temp.y*temp.y);
reductionCache[localIdx] = norm1;
// __syncthreads(); // threads in warp are always sync
norm1 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... then all feature dimensions are thresholded to a maximum value of 0.2
temp.x = min(0.2f, temp.x * norm1);
temp.y = min(0.2f, temp.y * norm1);
float norm2 = (temp.x*temp.x + temp.y*temp.y);
reductionCache[localIdx] = norm2;
// __syncthreads(); // threads in warp are always sync
norm2 = rsqrt(ND_WarpReduction(reductionCache, localIdx));
// ... and the vector is again normalized to unit length
temp.x *= norm2;
temp.y *= norm2;
d_des[globalIdx] = temp;
// move to the next descriptor, if there is any unprocessed
globalIdx += IMUL(gridDim.x, blockDim.x);
}
}
#else
template<int DESCRIPTOR_SIZE, int SHIFT>
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[DESCRIPTOR_SIZE];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num)
return;
int sidx = idx << SHIFT;
float norm1 = 0;
float norm2 = 0;
#pragma unroll
// the vector is first normalized to unit length, thus adjusting for changing image contrast
for(int i = 0; i < DESCRIPTOR_SIZE; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx+i);
norm1 += (temp[i].x*temp[i].x + temp[i].y*temp[i].y + temp[i].z*temp[i].z + temp[i].w*temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
// ... then all feature dimensions are thresholded to a maximum value of 0.2
for(int i = 0; i < DESCRIPTOR_SIZE; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x*temp[i].x + temp[i].y*temp[i].y + temp[i].z*temp[i].z + temp[i].w*temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
// ... and the vector is again normalized to unit length
for(int i = 0; i < DESCRIPTOR_SIZE; ++i)
{
temp[i].x *= norm2;
temp[i].y *= norm2;
temp[i].z *= norm2;
temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
#endif // (GPU_HESSIAN || GPU_SIFT_MODIFIED) && NORMALIZE_DESCRIPTOR_PER_WARP
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex, int rect, int stream)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num * 128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = DESCRIPTOR_COMPUTE_BLOCK_SIZE;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(rect)
{
if(GlobalUtil::_UseDynamicIndexing)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
ComputeDescriptorRECT_Kernel<true, true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptorRECT_Kernel<true, false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
ComputeDescriptorRECT_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
else
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
ComputeDescriptorRECT_Kernel<false, true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptorRECT_Kernel<false, false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
ComputeDescriptorRECT_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
else
{
if(GlobalUtil::_UseDynamicIndexing)
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
ComputeDescriptor_Kernel<true, true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptor_Kernel<true, false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
ComputeDescriptor_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
else
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
{
if(GlobalUtil::_HalfSIFT)
ComputeDescriptor_Kernel<false, true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptor_Kernel<false, false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
}
#else
ComputeDescriptor_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
if(GlobalUtil::_NormalizedSIFT)
{
#if (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED) && defined NORMALIZE_DESCRIPTOR_PER_WARP
// 32 threads (one warp) normalize one descriptor -> one thread handles one float4
// 1D block size (128, 1, 1) => four descriptors are normalized per block
// 1D grid size (Bx, 1, 1)
if(GlobalUtil::_HalfSIFT)
dtex->BindTexture(texDataH2);
else
dtex->BindTexture(texDataF4);
const int blockWidth = DESCRIPTOR_NORMALIZE_PER_BLOCK;
int blocksInGrid = min(16384, (num*32 + blockWidth -1) / blockWidth);
dim3 grid(blocksInGrid);
dim3 block(blockWidth);
if(GlobalUtil::_HalfSIFT)
NormalizeDescriptor_Kernel<<<grid, block, blockWidth*sizeof(float)>>>((float2*) dtex->_cuData, num);
else
NormalizeDescriptor_Kernel<<<grid, block, blockWidth*sizeof(float)>>>((float4*) dtex->_cuData, num);
#else
dtex->BindTexture(texDataF4);
const int block_width = DESCRIPTOR_NORMALIZE_PER_BLOCK;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
NormalizeDescriptor_Kernel<32, 5><<<grid, block>>>((float4*) dtex->_cuData, num);
#endif // (GPU_HESSIAN || GPU_SIFT_MODIFIED) && NORMALIZE_DESCRIPTOR_PER_WARP
}
CheckErrorCUDA("ComputeDescriptor");
}
#if (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED) && defined TOP_K_SELECTION
// Enables maximum occupancy - bitonic sort
#define SHARED_SIZE_LIMIT 1024U // CC2.0
#define TOPK_BLOCK_SIZE 128 // 256
void ProgramCU::TopKInit(TopKData &data, int listSize, int countThreshold)
{
data.keypointsCount = listSize;
// the closest power of two higher than keypoints count
data.keypointsCountAsPowerOfTwo = 1;
while (data.keypointsCountAsPowerOfTwo < listSize)
data.keypointsCountAsPowerOfTwo *= 2;
// keypoints count have to be at least SHARED_SIZE_LIMIT (required by bitonic sort implementation)
data.keypointsCountAsPowerOfTwo = max(data.keypointsCountAsPowerOfTwo, SHARED_SIZE_LIMIT);
// allocate arrays to store responses and indices
cudaMalloc(&data.keys, (data.keypointsCountAsPowerOfTwo+1) * sizeof(float)); // one extra element in both arrays is used by prefix scan
cudaMalloc(&data.indices, (data.keypointsCountAsPowerOfTwo+1) * sizeof(unsigned int));
data.topKCountThreshold = countThreshold;
}
void ProgramCU::TopKFinish(TopKData &data)
{
// release all data
cudaFree(data.keys);
cudaFree(data.indices);
cudaFree(data.devLevelFeaturesCount);
delete[] data.levelFeaturesCount;
}
void __global__ CopyTopKData_Kernel(int width, float *keys, unsigned int *indices, int listLen, int offset, int keypointsCount)
{
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= listLen)
return;
float response = MIN_VALUE;
if(idx+offset < keypointsCount)
{
// read keypoint index
int4 ikey = tex1Dfetch(texDataList, idx);
int keyIdx = IMUL(width, ikey.y) + ikey.x;
// read keypoint additional info
float4 key = tex1Dfetch(texDataF4, keyIdx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// keypoint info
// key.x: response 16b | 14b unused | 2b type
// key.y: x
// key.z: y
// key.w: scale
// extract response
unsigned int value = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
value = (value & 0xFFFF0000u) >> 16;
response = __half2float((unsigned short)value);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
keys[idx+offset] = fabs(response);
indices[idx+offset] = idx+offset;
}
void ProgramCU::TopKCopyData(CuTexImage* list, CuTexImage* key, TopKData &topKData, int offset)
{
bool isActivePadding = ((list == NULL) && (key == NULL)); // clear the padding values?
int len = isActivePadding ? (topKData.keypointsCountAsPowerOfTwo - offset) : list->GetImgWidth();
if(len <= 0)
return;
int width = isActivePadding ? 0 : key->GetImgWidth();
if(!isActivePadding)
{
list->BindTexture(texDataList);
key->BindTexture(texDataF4);
}
int blockWidth = TOPK_BLOCK_SIZE;
dim3 grid((len + blockWidth - 1) / blockWidth);
dim3 block(blockWidth);
CopyTopKData_Kernel<<<grid, block>>>(width, topKData.keys, topKData.indices, len, offset, topKData.keypointsCount);
CheckErrorCUDA("CopyTopKData");
}
// Map to single instructions on G8x / G9x / G100
#define UMUL(a, b) __umul24((a), (b))
#define UMAD(a, b, c) ( UMUL((a), (b)) + (c) )
__device__ inline void BMS_Comparator(float &keyA, unsigned int &valA, float &keyB, unsigned int &valB, unsigned int dir)
{
union {
unsigned int uintValue;
float floatValue;
};
if ((keyA > keyB) == dir)
{
floatValue = keyA; keyA = keyB; keyB = floatValue;
uintValue = valA; valA = valB; valB = uintValue;
}
}
////////////////////////////////////////////////////////////////////////////////
// Monolithic bitonic sort kernel for short arrays fitting into shared memory
////////////////////////////////////////////////////////////////////////////////
__global__ void BitonicSortShared_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int dir)
{
// Shared memory storage for one or more short vectors
__shared__ float sKey[SHARED_SIZE_LIMIT];
__shared__ unsigned int sVal[SHARED_SIZE_LIMIT];
// Offset to the beginning of subbatch and load data
int offset = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
srcKey += offset;
srcVal += offset;
dstKey += offset;
dstVal += offset;
sKey[threadIdx.x + 0] = srcKey[ 0];
sVal[threadIdx.x + 0] = srcVal[ 0];
sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcKey[(SHARED_SIZE_LIMIT / 2)];
sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcVal[(SHARED_SIZE_LIMIT / 2)];
for (unsigned int size = 2; size < arrayLength; size <<= 1)
{
// bitonic merge
unsigned int ddd = dir ^ ((threadIdx.x & (size / 2)) != 0);
for (unsigned int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
}
// ddd == dir for the last bitonic merge step
{
for (unsigned int stride = arrayLength / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
dir
);
}
}
__syncthreads();
dstKey[ 0] = sKey[threadIdx.x + 0];
dstVal[ 0] = sVal[threadIdx.x + 0];
dstKey[(SHARED_SIZE_LIMIT / 2)] = sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
dstVal[(SHARED_SIZE_LIMIT / 2)] = sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
////////////////////////////////////////////////////////////////////////////////
// Bitonic sort kernel for large arrays (not fitting into shared memory)
////////////////////////////////////////////////////////////////////////////////
// Bottom-level bitonic sort
// Almost the same as bitonicSortShared with the exception of even / odd subarrays being sorted in opposite directions
// Bitonic merge accepts both Ascending | descending or descending | ascending sorted pairs
__global__ void BitonicSortShared1_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal)
{
// Shared memory storage for current subarray
__shared__ float sKey[SHARED_SIZE_LIMIT];
__shared__ unsigned int sVal[SHARED_SIZE_LIMIT];
// Offset to the beginning of subarray and load data
int offset = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
srcKey += offset;
srcVal += offset;
dstKey += offset;
dstVal += offset;
sKey[threadIdx.x + 0] = srcKey[ 0];
sVal[threadIdx.x + 0] = srcVal[ 0];
sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcKey[(SHARED_SIZE_LIMIT / 2)];
sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcVal[(SHARED_SIZE_LIMIT / 2)];
for (unsigned int size = 2; size < SHARED_SIZE_LIMIT; size <<= 1)
{
// Bitonic merge
unsigned int ddd = (threadIdx.x & (size / 2)) != 0;
for (unsigned int stride = size / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
}
// odd / even arrays of SHARED_SIZE_LIMIT elements
// sorted in opposite directions
unsigned int ddd = blockIdx.x & 1;
{
for (unsigned int stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
}
__syncthreads();
dstKey[ 0] = sKey[threadIdx.x + 0];
dstVal[ 0] = sVal[threadIdx.x + 0];
dstKey[(SHARED_SIZE_LIMIT / 2)] = sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
dstVal[(SHARED_SIZE_LIMIT / 2)] = sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
// Bitonic merge iteration for stride >= SHARED_SIZE_LIMIT
__global__ void BitonicMergeGlobal_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int size, unsigned int stride, unsigned int dir)
{
unsigned int global_comparatorI = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int comparatorI = global_comparatorI & (arrayLength / 2 - 1);
// Bitonic merge
unsigned int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
unsigned int pos = 2 * global_comparatorI - (global_comparatorI & (stride - 1));
float keyA = srcKey[pos + 0];
unsigned int valA = srcVal[pos + 0];
float keyB = srcKey[pos + stride];
unsigned int valB = srcVal[pos + stride];
BMS_Comparator(
keyA, valA,
keyB, valB,
ddd
);
dstKey[pos + 0] = keyA;
dstVal[pos + 0] = valA;
dstKey[pos + stride] = keyB;
dstVal[pos + stride] = valB;
}
// Combined bitonic merge steps for size > SHARED_SIZE_LIMIT and stride = [1 .. SHARED_SIZE_LIMIT / 2]
__global__ void BitonicMergeShared_Kernel(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int size, unsigned int dir)
{
// Shared memory storage for current subarray
__shared__ float sKey[SHARED_SIZE_LIMIT];
__shared__ unsigned int sVal[SHARED_SIZE_LIMIT];
int offset = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
srcKey += offset;
srcVal += offset;
dstKey += offset;
dstVal += offset;
sKey[threadIdx.x + 0] = srcKey[ 0];
sVal[threadIdx.x + 0] = srcVal[ 0];
sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcKey[(SHARED_SIZE_LIMIT / 2)];
sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = srcVal[(SHARED_SIZE_LIMIT / 2)];
// Bitonic merge
unsigned int comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
unsigned int ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for(unsigned int stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
unsigned int pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
BMS_Comparator(
sKey[pos + 0], sVal[pos + 0],
sKey[pos + stride], sVal[pos + stride],
ddd
);
}
__syncthreads();
dstKey[ 0] = sKey[threadIdx.x + 0];
dstVal[ 0] = sVal[threadIdx.x + 0];
dstKey[(SHARED_SIZE_LIMIT / 2)] = sKey[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
dstVal[(SHARED_SIZE_LIMIT / 2)] = sVal[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
unsigned int BMS_FactorOf2(unsigned int *log2L, unsigned int L)
{
if (!L)
{
*log2L = 0;
return 0;
}
else
{
for (*log2L = 0; (L & 1) == 0; L >>= 1, *log2L++);
return L;
}
}
///////////////////////////////////////////////////////////////////////////////
// bitonic sort is borrowed from the sorting networks sample which is a part of
// the NVIDIA GPU Computing SDK (NVIDIA CUDA Code Samples)
unsigned int BMS_BitonicSort(float *dstKey, unsigned int *dstVal, float *srcKey, unsigned int *srcVal, unsigned int arrayLength, unsigned int dir)
{
// Nothing to sort
if(arrayLength < 2)
return 0;
// only power-of-two array lengths are supported by this implementation
unsigned int log2L;
unsigned int factorizationRemainder = BMS_FactorOf2(&log2L, arrayLength);
assert(factorizationRemainder == 1);
dir = (dir != 0);
unsigned int blockCount = arrayLength / SHARED_SIZE_LIMIT;
unsigned int threadCount = SHARED_SIZE_LIMIT / 2;
if(arrayLength <= SHARED_SIZE_LIMIT)
{
assert(arrayLength % SHARED_SIZE_LIMIT == 0);
BitonicSortShared_Kernel<<<blockCount, threadCount>>>(dstKey, dstVal, srcKey, srcVal, arrayLength, dir);
}
else
{
BitonicSortShared1_Kernel<<<blockCount, threadCount>>>(dstKey, dstVal, srcKey, srcVal);
for(unsigned int size = 2 * SHARED_SIZE_LIMIT; size <= arrayLength; size <<= 1)
for (unsigned stride = size / 2; stride > 0; stride >>= 1)
if(stride >= SHARED_SIZE_LIMIT)
{
BitonicMergeGlobal_Kernel<<<arrayLength / 512, 256>>>(dstKey, dstVal, dstKey, dstVal, arrayLength, size, stride, dir);
}
else
{
BitonicMergeShared_Kernel<<<blockCount, threadCount>>>(dstKey, dstVal, dstKey, dstVal, arrayLength, size, dir);
break;
}
}
return threadCount;
}
#undef SHARED_SIZE_LIMIT
void ProgramCU::TopKSort(TopKData &topKData)
{
// sort in descending order
unsigned int threadsCount = BMS_BitonicSort(
topKData.keys, topKData.indices, // dst
topKData.keys, topKData.indices, // src
topKData.keypointsCountAsPowerOfTwo, // array length - have to be power of 2
0 // sort direction
);
}
__global__ void MarkSelectedElements_Kernel(unsigned int *outFlags, unsigned int *inIndices, int len, int topK)
{
int threadID = threadIdx.x + blockDim.x* blockIdx.x;
if (threadID >= len)
return;
if(threadID < topK)
{
unsigned int index = inIndices[threadID];
outFlags[index] = 1;
} // 0 are not written because of cleared array to 0
}
/////////////////////
// Define this to more rigorously avoid bank conflicts, even at the lower (root) levels of the tree
// Note that due to the higher addressing overhead, performance is lower with ZERO_BANK_CONFLICTS enabled.
// It is provided as an example.
//#define ZERO_BANK_CONFLICTS
// 16 banks on G80, 32 foc CC 2.X and 3.X
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#ifdef ZERO_BANK_CONFLICTS
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS))
#else
#define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS)
#endif
///////////////////////////////////////////////////////////////////////////////
// prefix scan is borrowed from the scan large array sample which is a part of
// the NVIDIA GPU Computing SDK
//
// Work-efficient compute implementation of scan, one thread per 2 elements
// Work-efficient: O(log(n)) steps, and O(n) adds.
// Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging
// Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements.
//
// In addition, If ZERO_BANK_CONFLICTS is defined, uses
// n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS)
// shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using
// single-element offsets every NUM_BANKS elements, plus additional single-element offsets
// after every NUM_BANKS^2 elements.
//
// Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums
// and Their Applications", or Prins and Chatterjee PRAM course notes:
// https://www.cs.unc.edu/~prins/Classes/633/Handouts/pram.pdf
//
// This work-efficient version is based on the algorithm presented in Guy Blelloch's
// excellent paper "Prefix sums and their applications".
// http://www.cs.cmu.edu/~blelloch/papers/Ble93.pdf
//
// Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined)
// Con: More instructions to compute bank-conflict-free shared memory addressing,
// and slightly more shared memory storage used.
//
template <bool isNP2>
__device__ void PPS_LoadSharedChunkFromMem(unsigned int *sData, const unsigned int *inData, int n, int baseIndex, int& ai, int& bi, int& memAi, int& memBi, int& bankOffsetA, int& bankOffsetB)
{
int threadID = threadIdx.x;
memAi = baseIndex + threadIdx.x;
memBi = memAi + blockDim.x;
ai = threadID;
bi = threadID + blockDim.x;
// compute spacing to avoid bank conflicts
bankOffsetA = CONFLICT_FREE_OFFSET(ai);
bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// cache the computational window in shared memory, pad values beyond n with zeros
sData[ai + bankOffsetA] = inData[memAi];
if(isNP2) // compile-time decision
{
sData[bi + bankOffsetB] = (bi < n) ? inData[memBi] : 0;
}
else
{
sData[bi + bankOffsetB] = inData[memBi];
}
}
template <bool isNP2>
__device__ void PPS_StoreSharedChunkToMem(unsigned int *outData, const unsigned int *sData, int n, int ai, int bi, int memAi, int memBi, int bankOffsetA, int bankOffsetB)
{
__syncthreads();
// write results to global memory
outData[memAi] = sData[ai + bankOffsetA];
if(isNP2) // compile-time decision
{
if(bi < n)
outData[memBi] = sData[bi + bankOffsetB];
}
else
{
outData[memBi] = sData[bi + bankOffsetB];
}
}
template <bool storeSum>
__device__ void PPS_ClearLastElement(unsigned int *sData, unsigned int *blockSums, int blockIndex)
{
if(threadIdx.x == 0)
{
int index = (blockDim.x << 1) - 1;
index += CONFLICT_FREE_OFFSET(index);
if(storeSum) // compile-time decision
{
// write this block's total sum to the corresponding index in the blockSums array
blockSums[blockIndex] = sData[index];
}
// zero the last element in the scan so it will propagate back to the front
sData[index] = 0;
}
}
__device__ unsigned int PPS_BuildSum(unsigned int *sData)
{
unsigned int threadID = threadIdx.x;
unsigned int stride = 1;
// build the sum in place up the tree
for(int d = blockDim.x; d > 0; d >>= 1)
{
__syncthreads();
if(threadID < d)
{
int i = IMUL(IMUL(2, stride), threadID);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
sData[bi] += sData[ai];
}
stride *= 2;
}
return stride;
}
__device__ void PPS_ScanRootToLeaves(unsigned int *sData, unsigned int stride)
{
unsigned int threadID = threadIdx.x;
// traverse down the tree building the scan in place
for(int d = 1; d <= blockDim.x; d *= 2)
{
stride >>= 1;
__syncthreads();
if(threadID < d)
{
int i = IMUL(IMUL(2, stride), threadID);
int ai = i + stride - 1;
int bi = ai + stride;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
unsigned int t = sData[ai];
sData[ai] = sData[bi];
sData[bi] += t;
}
}
}
template <bool storeSum>
__device__ void PPS_PrescanBlock(unsigned int *data, int blockIndex, unsigned int *blockSums)
{
// build the sum in place up the tree
int stride = PPS_BuildSum(data);
PPS_ClearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex);
// traverse down tree to build the scan
PPS_ScanRootToLeaves(data, stride);
}
template <bool storeSum, bool isNP2>
__global__ void PPS_Prescan_Kernel(unsigned int *outData, const unsigned int *inData, unsigned int *blockSums, int n, int blockIndex, int baseIndex)
{
extern __shared__ unsigned int shData[];
int ai, bi, memAi, memBi, bankOffsetA, bankOffsetB;
// load data into shared memory
PPS_LoadSharedChunkFromMem<isNP2>(shData, inData, n, (baseIndex == 0) ? IMUL(blockIdx.x, (blockDim.x << 1)) : baseIndex, ai, bi, memAi, memBi, bankOffsetA, bankOffsetB);
// scan the data in each block
PPS_PrescanBlock<storeSum>(shData, blockIndex, blockSums);
// write results to device memory
PPS_StoreSharedChunkToMem<isNP2>(outData, shData, n, ai, bi, memAi, memBi, bankOffsetA, bankOffsetB);
}
__global__ void PPS_UniformAdd_Kernel(unsigned int *data, unsigned int *uniforms, int n, int blockOffset, int baseIndex)
{
__shared__ unsigned int uni;
if(threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = IMUL(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
data[address] += uni;
data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
}
inline bool PPS_IsPowerOfTwo(int n)
{
return ((n&(n-1))==0) ;
}
inline int PPS_FloorPow2(int n)
{
#ifdef WIN32
// method 2
return 1 << (int)logb((float)n);
#else
// method 1
// float nf = (float)n;
// return 1 << (((*(int*)&nf) >> 23) - 127);
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
#define THREADS_PER_BLOCK 128
void PPS_PreallocBlockSums(TopKData &topKData, unsigned int maxNumElements)
{
assert(topKData.numElementsAllocated == 0); // shouldn't be called
topKData.numElementsAllocated = maxNumElements;
unsigned int blockSize = THREADS_PER_BLOCK; // max size of the thread blocks
unsigned int numElements = maxNumElements;
int level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
if(numBlocks > 1)
level++;
numElements = numBlocks;
} while(numElements > 1);
topKData.scanBlockSums = (unsigned int**) malloc(level * sizeof(unsigned int*));
topKData.numLevelsAllocated = level;
numElements = maxNumElements;
level = 0;
do
{
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
if (numBlocks > 1)
cudaMalloc((void**) &topKData.scanBlockSums[level++], numBlocks * sizeof(unsigned int));
numElements = numBlocks;
} while(numElements > 1);
}
void PPS_DeallocBlockSums(TopKData &topKData)
{
for(unsigned int i = 0; i < topKData.numLevelsAllocated; i++)
cudaFree(topKData.scanBlockSums[i]);
free((void**)topKData.scanBlockSums);
topKData.scanBlockSums = NULL;
topKData.numElementsAllocated = 0;
topKData.numLevelsAllocated = 0;
}
// prefix scan is borrowed from the scan large array sample which is a part of NVIDIA GPU Computing SDK
void PPS_PrescanArrayRecursive(TopKData &topKData, unsigned int *outArray, const unsigned int *inArray, int numElements, int level)
{
unsigned int blockSize = THREADS_PER_BLOCK; // max size of the thread blocks
unsigned int numBlocks = max(1, (int)ceil((float)numElements / (2.f * blockSize)));
unsigned int numThreads;
if(numBlocks > 1)
numThreads = blockSize;
else if(PPS_IsPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = PPS_FloorPow2(numElements);
unsigned int numElementsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
unsigned int numElementsLastBlock = numElements - (numBlocks-1) * numElementsPerBlock;
unsigned int numThreadsLastBlock = max(1, numElementsLastBlock / 2);
unsigned int np2LastBlock = 0;
unsigned int sharedMemLastBlock = 0;
if (numElementsLastBlock != numElementsPerBlock)
{
np2LastBlock = 1;
if(!PPS_IsPowerOfTwo(numElementsLastBlock))
numThreadsLastBlock = PPS_FloorPow2(numElementsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(unsigned int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
unsigned int extraSpace = numElementsPerBlock / NUM_BANKS;
unsigned int sharedMemSize = sizeof(unsigned int) * (numElementsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
// execute the scan
if(numBlocks > 1)
{
PPS_Prescan_Kernel<true, false><<<grid, threads, sharedMemSize>>>(outArray, inArray, topKData.scanBlockSums[level], numThreads*2, 0, 0);
if(np2LastBlock)
PPS_Prescan_Kernel<true, true><<<1, numThreadsLastBlock, sharedMemLastBlock>>>(outArray, inArray, topKData.scanBlockSums[level], numElementsLastBlock, numBlocks - 1, numElements - numElementsLastBlock);
// After scanning all the sub-blocks, we are mostly done. But now we need to take all of the last
// values of the sub-blocks and scan those. This will give us a new value that must be sdded to each
// block to get the final results.
// recursive (CPU) call
PPS_PrescanArrayRecursive(topKData, topKData.scanBlockSums[level], topKData.scanBlockSums[level], numBlocks, level+1);
PPS_UniformAdd_Kernel<<<grid, threads>>>(outArray, topKData.scanBlockSums[level], numElements - numElementsLastBlock, 0, 0);
if(np2LastBlock)
PPS_UniformAdd_Kernel<<<1, numThreadsLastBlock>>>(outArray, topKData.scanBlockSums[level], numElementsLastBlock, numBlocks - 1, numElements - numElementsLastBlock);
}
else if(PPS_IsPowerOfTwo(numElements))
{
PPS_Prescan_Kernel<false, false><<<grid, threads, sharedMemSize>>>(outArray, inArray, 0, numThreads*2, 0, 0);
}
else
{
PPS_Prescan_Kernel<false, true><<<grid, threads, sharedMemSize>>>(outArray, inArray, 0, numElements, 0, 0);
}
}
void ProgramCU::TopKPrefixScan(TopKData &topKData)
{
if(topKData.keypointsCount <= 0)
return;
topKData.numElementsAllocated = 0;
topKData.numLevelsAllocated = 0;
unsigned int numElements = topKData.keypointsCountAsPowerOfTwo;
unsigned int *devIdxs = (unsigned int *)topKData.keys;
unsigned int *devData = topKData.indices;
// clear keypoints flags to 0
cudaMemset(devIdxs, 0, (numElements+1)*sizeof(unsigned int));
const int blocks = (numElements + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// prepare input for prefix scan - mark selected keypoints by 1
MarkSelectedElements_Kernel<<<blocks, THREADS_PER_BLOCK>>>(devIdxs, devData, numElements+1, topKData.topKCountThreshold);
PPS_PreallocBlockSums(topKData, numElements);
// run the prescan
PPS_PrescanArrayRecursive(topKData, devIdxs, devIdxs, numElements, 0);
PPS_DeallocBlockSums(topKData);
CheckErrorCUDA("TopKPrefixScan");
}
#undef THREADS_PER_BLOCK
void __global__ GetLevelsFeatureNum_Kernel(unsigned int *indices, int *levelFeatureNum, int levelsCount)
{
int threadID = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(threadID >= levelsCount)
return;
unsigned int index = levelFeatureNum[threadID];
levelFeatureNum[threadID] = indices[index];
}
void ProgramCU::TopKGetLevelsFeatureNum(TopKData &topKData)
{
if(topKData.levelsCount <= 0)
return;
cudaMalloc((void**)&(topKData.devLevelFeaturesCount), topKData.levelsCount*sizeof(unsigned int));
cudaMemcpy(topKData.devLevelFeaturesCount, topKData.levelFeaturesCount, topKData.levelsCount*sizeof(unsigned int), cudaMemcpyHostToDevice);
int blockWidth = TOPK_BLOCK_SIZE;
dim3 grid((topKData.levelsCount + blockWidth - 1) / blockWidth);
dim3 block(blockWidth);
GetLevelsFeatureNum_Kernel<<<grid, block>>>((unsigned int *)topKData.keys, topKData.devLevelFeaturesCount, topKData.levelsCount);
cudaMemcpy(topKData.levelFeaturesCount, topKData.devLevelFeaturesCount, topKData.levelsCount*sizeof(unsigned int), cudaMemcpyDeviceToHost);
CheckErrorCUDA("TopKGetLevelsFeatureNum");
}
void __global__ CompactLevelFeatures_Kernel(float4 *outFeatures, unsigned int *indices, unsigned int offset, unsigned int featuresCount)
{
int threadID = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(threadID >= featuresCount)
return;
unsigned int index = indices[offset + threadID];
if(index == indices[offset + threadID + 1])
return;
unsigned int firstIndex = indices[offset];
index -= firstIndex;
float4 key = tex1Dfetch(texDataF4, threadID);
outFeatures[index] = key;
}
void ProgramCU::TopKCompactLevelFeatures(CuTexImage *list, unsigned int oldLen, float **newLevelFeatures, unsigned int newLen, TopKData &topKData, unsigned int offset)
{
if(newLen <= 0)
{
*newLevelFeatures = NULL;
return;
}
cudaMalloc((void**)(newLevelFeatures), newLen*4*sizeof(float));
list->BindTexture(texDataF4);
int blockWidth = TOPK_BLOCK_SIZE;
dim3 grid((oldLen + blockWidth - 1) / blockWidth);
dim3 block(blockWidth);
CompactLevelFeatures_Kernel<<<grid, block>>>((float4 *)(*newLevelFeatures), (unsigned int *)topKData.keys, offset, oldLen);
CheckErrorCUDA("TopKCompactLevelFeatures");
}
#endif // (GPU_HESSIAN || GPU_SIFT_MODIFIED) && TOP_K_SELECTION
#if defined GENERATE_FEATURE_LIST_USING_ATOMICS && (defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED)
void ProgramCU::DetectionDataInit(int **featureTexLen, int len)
{
cudaMalloc((void**)(featureTexLen), len*sizeof(int));
cudaMemset(*featureTexLen, 0, len*sizeof(int));
CheckErrorCUDA("ProgramCU::DetectionDataInit");
}
void ProgramCU::DetectionDataDownload(int *dst, int *featureTexLen, int len)
{
cudaMemcpy(dst, featureTexLen, len*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemset(featureTexLen, 0, len*sizeof(int));
CheckErrorCUDA("ProgramCU::DetectionDataDownload");
}
void ProgramCU::DetectionDataFinish(int **featureTexLen)
{
cudaFree(*featureTexLen);
*featureTexLen = NULL;
CheckErrorCUDA("ProgramCU::DetectionDataFinish");
}
#endif // GENERATE_FEATURE_LIST_USING_ATOMICS && (GPU_HESSIAN || GPU_SIFT_MODIFIED)
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
cudaThreadSynchronize();
}
int ProgramCU::CheckErrorCUDA(const char* location)
{
cudaError_t e = cudaGetLastError();
if(e)
{
if(location)
fprintf(stderr, "%s:\t", location);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
// assert(0);
return 1;
}
else {
return 0;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if((col < width) && (row < height))
{
int index = row * width + col;
float value = tex1Dfetch(texData, index);
d_result[index] = ((col == 0) || (row == 0) || (col == width-1) || (row == height-1)) ? 0.5 : saturate(0.5+20.0*value);
}
}
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL)
return;
int width = dog->GetImgWidth();
int height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertDOG_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if((col < width) && (row < height))
{
int index = row * width + col;
float value = tex1Dfetch(texData, index << 1);
d_result[index] = ((col == 0) || (row == 0) || (col == width-1) || (row == height-1)) ? 0.0 : saturate(5.0*value);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL)
return;
int width = got->GetImgWidth();
int height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertGRD_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if((col < width) && (row < height))
{
int index = row * width + col;
float4 key = tex1Dfetch(texDataF4, index);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input: make_float4(result, dx, dy, ds); => dx, dy, ds ... subpixel localizations (otherwise zero)
// result = response 16b half float | 14b unused | 2b type
// type = FEATURE_TYPE_DARK_BLOB = 0
// FEATURE_TYPE_BRIGHT_BLOB = 1
// FEATURE_TYPE_SADDLE_POINT = 2
// FEATURE_TYPE_NONE = 3
// extract feature type
unsigned int typeValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA7.5
typeValue = typeValue & 0x00000003u;
// int is_key = (typeValue != FEATURE_TYPE_NONE);
#else
int is_key = ((key.x == 1.0f) || (key.x == -1.0f));
#endif // GPU_SIFT_MODIFIED || GPU_HESSIAN
int inside = (col > 0) && (row > 0) && (row < height-1) && (col < width-1);
float value = inside ? saturate(0.5 + 20.0 * tex1Dfetch(texData, index)) : 0.5;
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
float4 result = make_float4(value, value, value, 0.0f);
switch(typeValue)
{
case FEATURE_TYPE_DARK_BLOB:
result = inside ? make_float4(1.0f, 0.0f, 0.0f, 1.0f) : result;
break;
case FEATURE_TYPE_BRIGHT_BLOB:
result = inside ? make_float4(0.0f, 1.0f, 0.0f, 1.0f) : result;
break;
case FEATURE_TYPE_SADDLE_POINT:
result = inside ? make_float4(0.0f, 0.0f, 1.0f, 1.0f) : result;
break;
case FEATURE_TYPE_NONE:
default:
break;
}
d_result[index] = result;
// if((typeValue != HESSIAN_FEATURE_TYPE_NONE) && inside)
// {
// d_result[index-1] = result;
// d_result[index+1] = result;
// d_result[index-width] = result;
// d_result[index+width] = result;
// }
#else
d_result[index] = (is_key && inside) ? ((key.x > 0) ? make_float4(1.0f, 0.0f, 0.0f, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)) : make_float4(value, value, value, 1.0f);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL)
return;
int width = key->GetImgWidth();
int height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertKEY_Kernel<<<grid, block>>>((float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num)
return;
float4 key = tex1Dfetch(texDataF4, idx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14b unused | scale 16b-8.8
// key.w: orientation
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
d_result[idx] = make_float4(key.x, key.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyPoint_Kernel<<<grid, block>>>((float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num)
return;
int kidx = idx / 10;
int vidx = idx - IMUL(kidx , 10);
// fetch feature/keypoint
float4 key = tex1Dfetch(texDataF4, kidx);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// input in the feature list
// key.x: response 8b H | x 24b-14.10
// key.y: response 8b L | y 24b-14.10
// key.z: 2b type | 14 unused | scale 16b-8.8
// key.w: orientation
// extract x position
unsigned int tmpValue = *((unsigned int *)(&key.x)); // __float_as_uint(key.x); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.x = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract y position
tmpValue = *((unsigned int *)(&key.y)); // __float_as_uint(key.y); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_POSITION_MASK;
key.y = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_POSITION_PRECISION_BITS);
// extract scale
tmpValue = *((unsigned int *)(&key.z)); // __float_as_uint(key.z); // CUDA 7.5
tmpValue = tmpValue & FIXED_POINT_SCALE_MASK;
key.z = FIXED_POINT_TO_FLOAT(tmpValue, FIXED_POINT_SCALE_PRECISION_BITS);
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
float sz = fabs(key.z * 3.0f);
///////////////////////
float s, c;
__sincosf(key.w, &s, &c);
///////////////////////
float dx = (vidx == 0) ? 0 : (((vidx <= 4) || (vidx >= 9)) ? sz : -sz);
float dy = (vidx <= 1) ? 0 : (((vidx <= 2) || (vidx >= 7)) ? -sz : sz);
float4 pos;
pos.x = key.x + c * dx - s * dy;
pos.y = key.y + c * dy + s * dx;
pos.z = 0;
pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyBox_Kernel<<<grid, block>>>((float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage::BindTexture(textureReference& texRef)
{
cudaBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
#if defined(SIFTGPU_ENABLE_LINEAR_TEX2D)
if ((_imgWidth*_numChannel*sizeof(float)) % 32)
std::cout<<"Warning: Row length should be multiply of 32 !"<<std::endl;
cudaBindTexture2D(0, &texRef, _cuData, &texRef.channelDesc, _imgWidth, _imgHeight, _imgWidth*_numChannel*sizeof(float));
#else
cudaChannelFormatDesc desc;
cudaGetChannelDesc(&desc, _cuData2D);
cudaBindTextureToArray(&texRef, _cuData2D, &desc);
#endif
}
int ProgramCU::CheckCudaDevice(int device)
{
int count = 0, device_used;
if((cudaGetDeviceCount(&count) != cudaSuccess) || (count <= 0))
{
ProgramCU::CheckErrorCUDA("CheckCudaDevice");
return 0;
}
else if(count == 1)
{
cudaDeviceProp deviceProp;
if((cudaGetDeviceProperties(&deviceProp, 0) != cudaSuccess) || ((deviceProp.major == 9999) && (deviceProp.minor == 9999)))
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA.\n");
return 0;
}
else
{
GlobalUtil::_MemCapGPU = deviceProp.totalGlobalMem / 1024;
GlobalUtil::_texMaxDimGL = 32768;
if(GlobalUtil::_verbose)
fprintf(stdout, "NOTE: changing maximum texture dimension to %d\n", GlobalUtil::_texMaxDimGL);
}
}
if((device > 0) && (device < count))
{
cudaSetDevice(device);
CheckErrorCUDA("cudaSetDevice\n");
}
cudaGetDevice(&device_used);
#if defined GPU_HESSIAN || defined GPU_SIFT_MODIFIED
// we need CC 2.0 at least for feature list construction using atomics and topk selection
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device_used);
if(deviceProp.major < 2)
{
fprintf(stderr, "CheckCudaDevice: no device supporting CUDA CC 2.X or higher. Your device has just CC %d.%d.\n", deviceProp.major, deviceProp.minor);
fprintf(stderr, " Disable GENERATE_FEATURE_LIST_USING_ATOMICS and TOP_K_SELECTION in config.h and rebuild project.\n");
return 0;
}
#endif // GPU_HESSIAN || GPU_SIFT_MODIFIED
if(device != device_used)
fprintf(stderr, "\nERROR: Cannot set device to %d\n"
"\nWARNING: Use # %d device instead (out of %d)\n", device, device_used, count);
return 1;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, cudaReadModeElementType> texDes1;
texture<uint4, 1, cudaReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptor_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
ProgramCU::CheckErrorCUDA("MultiplyDescriptor");
}
texture<float, 1, cudaReadModeElementType> texLoc1;
texture<float2, 1, cudaReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x);
diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y);
if(diff[0] < hdistmax && diff[1] < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * loc2.x + F.mat[1][0] * loc2.y + F.mat[2][0];
ftx2[1] = F.mat[0][1] * loc2.x + F.mat[1][1] * loc2.y + F.mat[2][1];
//ftx2[2] = F.mat[0][2] * loc2.x + F.mat[1][2] * loc2.y + F.mat[2][2];
x2fx1 = loc2.x * fx1[0] + loc2.y * fx1[1] + fx1[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float H[3][3], float hdistmax, float F[3][3], float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptorG_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, cudaReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];//
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
texDot->BindTexture(texDOT);
RowMatch_Kernel<<<grid, block>>>((int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, cudaReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
ColMatch_Kernel<<<grid, block>>>((int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
|
732252c53fcfdda8b90881c2fc0d1fe9e157e7bf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#define N 1024 // must be a power of 2.
#define BLOCKSIZE N
__global__ void RKPlusNBy2(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[id] += nelements[id + off];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
__global__ void RKNminusI(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[id] += nelements[2 * off - id - 1];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
__global__ void RKConsecutive(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[N / off * id] += nelements[N / off * id + N / 2 / off];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
int main() {
unsigned hnelements[N];
unsigned sum = 0;
for (unsigned ii = 0; ii < N; ++ii) {
hnelements[ii] = rand() % 20;
sum += hnelements[ii];
}
printf("CPU sum = %d\n", sum);
unsigned nblocks = (N + BLOCKSIZE - 1) / BLOCKSIZE;
unsigned *nelements;
hipMalloc(&nelements, N * sizeof(unsigned));
hipMemcpy(nelements, hnelements, N * sizeof(unsigned), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( RKPlusNBy2), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, nelements);
hipMemcpy(nelements, hnelements, N * sizeof(unsigned), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( RKNminusI), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, nelements);
hipMemcpy(nelements, hnelements, N * sizeof(unsigned), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( RKConsecutive), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, nelements);
hipDeviceSynchronize();
return 0;
}
| 732252c53fcfdda8b90881c2fc0d1fe9e157e7bf.cu | #include <stdio.h>
#include <cuda.h>
#define N 1024 // must be a power of 2.
#define BLOCKSIZE N
__global__ void RKPlusNBy2(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[id] += nelements[id + off];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
__global__ void RKNminusI(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[id] += nelements[2 * off - id - 1];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
__global__ void RKConsecutive(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[N / off * id] += nelements[N / off * id + N / 2 / off];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
int main() {
unsigned hnelements[N];
unsigned sum = 0;
for (unsigned ii = 0; ii < N; ++ii) {
hnelements[ii] = rand() % 20;
sum += hnelements[ii];
}
printf("CPU sum = %d\n", sum);
unsigned nblocks = (N + BLOCKSIZE - 1) / BLOCKSIZE;
unsigned *nelements;
cudaMalloc(&nelements, N * sizeof(unsigned));
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
RKPlusNBy2<<<nblocks, BLOCKSIZE>>>(nelements);
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
RKNminusI<<<nblocks, BLOCKSIZE>>>(nelements);
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
RKConsecutive<<<nblocks, BLOCKSIZE>>>(nelements);
cudaDeviceSynchronize();
return 0;
}
|
c61bdd7507052d12868f3a9f8aa8a785329539ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
//using namespace Eigen;
using namespace std;
__device__ void setPhysicialParameters(float T, float *ce, float *pho, float *lamda)
{
float Ts = 1456.16f, Tl = 1522.69f, fs = 0.0f, L = 268000.0f;
if (T < Ts)
{
fs = 0;
*pho = 7250.0f;
*lamda = 50.0f;
*ce = 540.0f;
}
if (T >= Ts && T <= Tl)
{
fs = (Tl - T) / (Tl - Ts);
*pho = 7250.0f;
*lamda = fs * 25.0f + (1.0f - fs) * 50.0f;
*ce = 540.0f + L / (Tl - Ts);
}
if (T > Tl)
{
fs = 1;
*pho = 7250.0f;
*lamda = 28.0f;
*ce = 540.0f;
}
}
__device__ float setBoundaryCondition(int tstep, float tau, float Vcast, float *hPop, int Section, float *ccml)
{
float zposition = tstep * tau * fabs(Vcast);//(*,
float h = 0; //
for (int i = 0; i < Section; i++)
{
if (zposition >= *(ccml + i) && zposition <= *(ccml + i + 1))//h
{
h = *(hPop + blockIdx.x * Section + i);
}
}
return h;
}
__global__ void solvePDEKernel(float *hPop, float *T_Last, float *T_New, float *T_Surface, float Tw, float lamda, float pho, float ce, int ny, float dy, int nx, float dx, float tau, int tnpts, int tstep, float Vcast, int Section, float *ccml)
{
float ax, ay, T_Up, T_Down, T_Middle, T_Right, T_Left;
float h;
ax = tau * lamda / (pho * ce * dx * dx);
ay = tau * lamda / (pho * ce * dy * dy);
int i = threadIdx.x;
int j = threadIdx.y;
int tis = blockIdx.x * nx * ny + i * ny + j;
int L = ny;
setPhysicialParameters(T_Last[tis], &ce, &pho, &lamda);
h = setBoundaryCondition(tstep, tau, Vcast, hPop, Section, ccml);
if (i != 0 && i != (nx - 1) && j != 0 && j != (ny - 1))//
{
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_Middle = T_Last[tis];
T_Up = T_Last[tis + 1];
T_Down = T_Last[tis - 1];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j == 0)//1
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j == 0)//2
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j == (ny - 1))//3
{
T_Up = T_Last[tis - 1] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j == (ny - 1))//4
{
T_Up = T_Last[tis - 1] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j != 0 && j != (ny - 1))//1
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j != 0 && j != (ny - 1))//2
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i != 0 && i != (nx - 1) && j == 0)//3
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h* (T_Last[tis] - Tw) / lamda;
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i != 0 && i != (nx - 1) && j == (ny - 1))//4
{
T_Up = T_Last[tis - 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
if (i == 0 && j == int((ny - 1)/2))
T_Surface[blockIdx.x * tnpts + tstep] = T_New[tis];
T_Last[tis] = T_New[tis];
__syncthreads();
} | c61bdd7507052d12868f3a9f8aa8a785329539ae.cu | #include "includes.h"
//using namespace Eigen;
using namespace std;
__device__ void setPhysicialParameters(float T, float *ce, float *pho, float *lamda)
{
float Ts = 1456.16f, Tl = 1522.69f, fs = 0.0f, L = 268000.0f;
if (T < Ts)
{
fs = 0;
*pho = 7250.0f;
*lamda = 50.0f;
*ce = 540.0f;
}
if (T >= Ts && T <= Tl)
{
fs = (Tl - T) / (Tl - Ts);
*pho = 7250.0f;
*lamda = fs * 25.0f + (1.0f - fs) * 50.0f;
*ce = 540.0f + L / (Tl - Ts);
}
if (T > Tl)
{
fs = 1;
*pho = 7250.0f;
*lamda = 28.0f;
*ce = 540.0f;
}
}
__device__ float setBoundaryCondition(int tstep, float tau, float Vcast, float *hPop, int Section, float *ccml)
{
float zposition = tstep * tau * fabs(Vcast);//ËٶȳËÒÔʱ¼ä(ʱ¼äÍø¸ñ*Íø¸ñÊý£©,¸÷¸öÀäÈ´¶Î³¤¶È
float h = 0; //±íÃæ´«ÈÈϵÊý
for (int i = 0; i < Section; i++)
{
if (zposition >= *(ccml + i) && zposition <= *(ccml + i + 1))//ÏÞ¶¨¸÷¸öÀäÈ´¶Î£¬Ã¿¸öÀäÈ´¶Î¶ÔÓ¦Ò»¸öh
{
h = *(hPop + blockIdx.x * Section + i);
}
}
return h;
}
__global__ void solvePDEKernel(float *hPop, float *T_Last, float *T_New, float *T_Surface, float Tw, float lamda, float pho, float ce, int ny, float dy, int nx, float dx, float tau, int tnpts, int tstep, float Vcast, int Section, float *ccml)
{
float ax, ay, T_Up, T_Down, T_Middle, T_Right, T_Left;
float h;
ax = tau * lamda / (pho * ce * dx * dx);
ay = tau * lamda / (pho * ce * dy * dy);
int i = threadIdx.x;
int j = threadIdx.y;
int tis = blockIdx.x * nx * ny + i * ny + j;
int L = ny;
setPhysicialParameters(T_Last[tis], &ce, &pho, &lamda);
h = setBoundaryCondition(tstep, tau, Vcast, hPop, Section, ccml);
if (i != 0 && i != (nx - 1) && j != 0 && j != (ny - 1))//Öмä
{
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_Middle = T_Last[tis];
T_Up = T_Last[tis + 1];
T_Down = T_Last[tis - 1];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j == 0)//µã1
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j == 0)//µã2
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j == (ny - 1))//µã3
{
T_Up = T_Last[tis - 1] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j == (ny - 1))//µã4
{
T_Up = T_Last[tis - 1] - 2 * dx *h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == 0 && j != 0 && j != (ny - 1))//±ß1
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis + L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i == (nx - 1) && j != 0 && j != (ny - 1))//±ß2
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis - L] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i != 0 && i != (nx - 1) && j == 0)//±ß3
{
T_Up = T_Last[tis + 1];
T_Middle = T_Last[tis];
T_Down = T_Last[tis + 1] - 2 * dx * h* (T_Last[tis] - Tw) / lamda;
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
else if (i != 0 && i != (nx - 1) && j == (ny - 1))//±ß4
{
T_Up = T_Last[tis - 1] - 2 * dx * h * (T_Last[tis] - Tw) / lamda;
T_Middle = T_Last[tis];
T_Down = T_Last[tis - 1];
T_Right = T_Last[tis + L];
T_Left = T_Last[tis - L];
T_New[tis] = ax * T_Right - (2 * ax + 2 * ay - 1) * T_Middle + ax * T_Left + ay * T_Up + ay * T_Down;
}
if (i == 0 && j == int((ny - 1)/2))
T_Surface[blockIdx.x * tnpts + tstep] = T_New[tis];
T_Last[tis] = T_New[tis];
__syncthreads();
} |
8e2903e9c179d393f096773045545e42fd09eddf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n, const int dim,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*16];
assert( dim <= 16 );
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*dim;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*dim+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
const float* xyz1= &(xyz[(i*n+j)*dim]) ;
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[k*dim+di]-xyz1[di];
d += dif*dif;
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+1)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+2)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+3)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[k*dim+di]-xyz1[di];
d += dif*dif;
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+1)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+2)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+3)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[k*dim+di]-xyz1[di];
d += dif*dif;
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, hipStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto dim = xyz1.size(2);
if( dim != xyz2.size(2) ){
printf("dim do not match in chamfer_cuda_forward\n");
return 0;
}
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, n, dim, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, m, dim, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const int dim,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
int j2=idx1[i*n+j];
float g=grad_dist1[i*n+j]*2;
for(int di=0;di<dim;++di)
{
float x1=xyz1[(i*n+j)*dim+di];
float x2=xyz2[(i*m+j2)*dim+di];
atomicAdd(&(grad_xyz1[(i*n+j)*dim+di]),g*(x1-x2));
atomicAdd(&(grad_xyz2[(i*m+j2)*dim+di]),-(g*(x1-x2)));
}
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, hipStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// hipMemset(grad_xyz1,0,b*n*3*4);
// hipMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto dim = xyz1.size(2);
if( dim != xyz2.size(2) ){
printf("dim do not match in chamfer_cuda_forward\n");
return 0;
}
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,n,dim,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,m,dim,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__device__ inline void swapf(float & a, float & b)
{
float tmp = a;
a = b;
b = tmp;
}
__device__ inline void swap(int & a, int & b)
{
int tmp = a;
a = b ;
b = tmp;
}
__global__ void KnnKernel(int b,const int n,const int dim,const float * xyz,const int k,float * result,int * result_i){
const int size = 4096;
__shared__ float dist[size];
__shared__ int idx[size];
assert( n <= size );
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
{
for ( int i = blockIdx.y ; i < n ; i += gridDim.y )
{
for ( int j = threadIdx.x ; j < n ; j += blockDim.x )
{
if( i == j ){
dist[j] = 0;
idx[j] = j;
continue;
}
float d = 0.0;
for ( int di = 0 ; di < dim ; ++di )
{
float dif = xyz[(bi*n+i)*dim+di] - xyz[(bi*n+j)*dim+di];
d += dif*dif;
}
dist[j] = d;
idx[j] = j;
}
__syncthreads();
//odd-even sort
int pownum = int(log2(float(n)));
if ( n != pow(2, pownum) ){
for ( int cnt = 0 ; cnt < ( n + 1 ) / 2 ; ++cnt )
{
for ( int j = 2*threadIdx.x + 1 ; j < n ; j += 2*blockDim.x )
{
if ( dist[j] < dist[ j - 1 ] )
{
swapf(dist[j], dist[j-1]);
swap(idx[j], idx[j-1]);
}
}
__syncthreads();
for ( int j = 2*threadIdx.x + 2 ; j < n ; j += 2*blockDim.x )
{
if ( dist[j] < dist[ j - 1 ] )
{
swapf(dist[j], dist[j-1]);
swap(idx[j], idx[j-1]);
}
}
__syncthreads();
}
}else{
//Bitonic Sort
for (unsigned int t = 2; t <= n ; t *= 2)
{
// Bitonic merge:
for (unsigned int j = t / 2; j>0; j /= 2)
{
for (unsigned int tid = threadIdx.x ; tid < n ; tid += blockDim.x )
{
unsigned int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & t) == 0)
{
if (dist[tid] > dist[ixj])
{
swapf(dist[tid], dist[ixj]);
swap(idx[tid], idx[ixj]);
}
}
else
{
if (dist[tid] < dist[ixj])
{
swapf(dist[tid], dist[ixj]);
swap(idx[tid], idx[ixj]);
}
}
}
}
__syncthreads();
}
}
}
__syncthreads();
//copy result
for ( int j = threadIdx.x ; j < k ; j += blockDim.x )
{
result[(bi*n+i)*k+j] = dist[j+1];
result_i[ ((bi*n+i)*k+j)*2+0 ] = bi;
result_i[ ((bi*n+i)*k+j)*2+1 ] = idx[j+1];
}
}
}
}
int knn_cuda(at::Tensor xyz,at::Tensor k,at::Tensor dist,at::Tensor idx)
{
const auto bs = xyz.size(0);
const auto n = xyz.size(1); //num_points point cloud
const auto d = xyz.size(2);
int k_ = k.data<int>()[0];
hipLaunchKernelGGL(( KnnKernel), dim3(dim3(bs,16,1)),dim3(512), 0, 0, bs,n,d,xyz.data<float>(),k_,dist.data<float>(),idx.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd Knn: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
__global__ void interpKernel(const int b, const int p,const int L,const int H,const int W,const float* z,const float* prob,int* idx,float* w,float* p)
{
float stepy = 1.0 / float(H - 1);
float stepx = 1.0 / float(W - 1);
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
for ( int li = threadIdx.x; li < L ; li += blockDim.x )
{
float zx = z[((bi*p+pi)*2+0)*L+li];
float zy = z[((bi*p+pi)*2+1)*L+li];
if( zx < 0.0 || zy < 0.0 || zx >= 1.0 || zy >= 1.0 )
{
p[(bi*p+pi)*L+Li] = 0.0;
for( int i = 0 ; i < 4 ; i ++)
{
idx[((bi*p+pi)*2+0)*4+i)*L+li] = -1;
idx[((bi*p+pi)*2+1)*4+i)*L+li] = -1;
w[((bi*p+pi)*4+i)*L+li] = 0.0;
}
continue;
}
int zxn = int(zx / stepx);
int zyn = int(zy / stepy);
//
idx[((bi*p+pi)*2+0)*4+0)*L+li] = zxn;
idx[((bi*p+pi)*2+1)*4+0)*L+li] = zyn;
idx[((bi*p+pi)*2+0)*4+1)*L+li] = zxn;
idx[((bi*p+pi)*2+1)*4+1)*L+li] = zyn+1;
idx[((bi*p+pi)*2+0)*4+2)*L+li] = zxn+1;
idx[((bi*p+pi)*2+1)*4+2)*L+li] = zyn;
idx[((bi*p+pi)*2+0)*4+3)*L+li] = zxn+1;
idx[((bi*p+pi)*2+1)*4+3)*L+li] = zyn+1;
//
float x1w = zx - zxn*stepx;
float x2w = (zxn+1)*stepx - zx;
float y1w = zy - zyn*stepy;
float y2w = (zyn+1)*stepy - zy;
//
float w1 = y2w*x2w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+0)*L+li] = w1;
float w2 = y1w*x2w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+1)*L+li] = w2;
float w3 = y2w*x1w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+2)*L+li] = w3
float w4 = y1w*x1w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+3)*L+li] = w4;
//
float p1 = prob[((bi*p+pi)*H+zyn)*W+zxn];
float p2 = prob[((bi*p+pi)*H+zyn+1)*W+zxn];
float p3 = prob[((bi*p+pi)*H+zyn)*W+zxn+1];
float p4 = prob[((bi*p+pi)*H+zyn+1)*W+zxn+1];
//
p[(bi*p+pi)*L+Li] = p1*w1+p2*w2+p3*w3+p4*w4;
}
}
int interp_cuda_forward(at::Tensor z,at::Tensor prob,at::Tensor idx,at::Tensor w,at::Tensor p)
{
const auto b = z.size(0);
const auto p = z.size(1);
const auto L = z.size(3);
const auto H = prob.size(3);
const auto W = prob.size(4);
hipLaunchKernelGGL(( interpKernel), dim3(dim3(b,25,1)),dim3(512), 0, 0, b,p,L,H,W,z.data<float>(),prob.data<float>(),idx.data<int>(),w.data<float>(),p.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd Knn: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
__global__ void interpGradKernel(const int b, const int p,const int L,const int H,const int W,const float* grad,const int* idx,const float* w,float* gradp)
{
float stepy = 1.0 / float(H - 1);
float stepx = 1.0 / float(W - 1);
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
for ( int li = threadIdx.x; li < L ; li += blockDim.x )
{
float g = grad[((bi*p+pi)*L+li];
for(int i = 0 ; i < 4; i++)
{
float wv = w[((bi*p+pi)*4+i)*L+li];
const int x = idx[((bi*p+pi)*2+0)*4+i)*L+li];
const int y = idx[((bi*p+pi)*2+1)*4+i)*L+li];
if((x == -1) || (y == -1))break;
atomicAdd(&(prob[((bi*p+pi)*H+y)*W+x]),g*wv);
}
}
}
int interp_cuda_backward(at::Tensor grad,at::Tensor idx,at::Tensor w,at::Tensor gradp)
{
const auto b = grad.size(0);
const auto p = grad.size(1);
const auto L = grad.size(2);
const auto H = gradp.size(3);
const auto W = gradp.size(4);
hipLaunchKernelGGL(( interpGradKernel), dim3(dim3(b,25,1)),dim3(512), 0, 0, b,p,L,H,W,grad.data<float>(),idx.data<int>(),w.data<float>(),gradp.data<float>())
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd Knn: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
__global__ void selectKernel(const int b, const int p,const int dim,const int L,const int N,const float* in,const bool* select,int* idx,float* out)
{
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
{
for ( int ni = threadIdx.x; ni < N ; ni += blockDim.x )
{
idx[(bi*p+pi)*N+ni] = -1;
}
__syncthreads();
for ( int li = threadIdx.x; li < L ; li += blockDim.x )
{
if( select[(bi*p+pi)*L+Li] )
{
for( int ni = 0 ; ni < N ; ni ++ )
{
int v = atomicExch(&(idx[(bi*p+pi)*N+ni]),li)
if( v == -1 )
{
for(int di=0;di<dim;++di)
{
out[((bi*p+pi)*dim+di)*N+ni] = in[((bi*p+pi)*dim+di)*L+li];
}
break;
}else{
atomicExch(&(idx[(bi*p+pi)*N+ni]),v);
}
}
}
}
}
}
int select_cuda_forward(at::Tensor in,at::Tensor select,at::Tensor idx,at::Tensor out)
{
const auto b = in.size(0);
const auto p = in.size(1);
const auto L = in.size(-1);
const auto d = in.dim();
const int dim = 1;
if(d == 3)
{
dim = 1;
}else if(d == 4){
dim = in.size(2);
}else{
printf("input tensor must be (B,P,C,L) or (B,P,L)");
return 0;
}
const auto N = out.size(-1);
hipLaunchKernelGGL(( selectKernel), dim3(dim3(b,25,1)),dim3(512), 0, 0, b,p,dim,L,N,in.data<float>(),select.data<bool>(),idx.data<int>(),out.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd Knn: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
__global__ void selectGradKernel(const int b, const int p,const int dim,const int L,const int N,const float* outgrad,const int* idx,float* ingrad)
{
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
{
for ( int ni = threadIdx.x; ni < N ; ni += blockDim.x )
{
int li = idx[(bi*p+pi)*N+ni];
for(int di=0;di<dim;++di)
{
ingrad[((bi*p+pi)*dim+di)*L+li] = outgrad[((bi*p+pi)*dim+di)*N+ni];
}
}
}
}
int select_cuda_backward(at::Tensor outgrad,at::Tensor idx,at::Tensor ingrad)
{
const auto b = outgrad.size(0);
const auto p = outgrad.size(1);
const auto N = outgrad.size(-1);
const auto d = outgrad.dim();
const auto L = ingrad.size(-1);
const int dim = 1;
if(d == 3)
{
dim = 1;
}else if(d == 4){
dim = in.size(2);
}else{
printf("input tensor must be (B,P,C,L) or (B,P,L)");
return 0;
}
const auto N = out.size(-1);
hipLaunchKernelGGL(( selectGradKernel), dim3(dim3(b,25,1)),dim3(512), 0, 0, b,p,dim,L,N,outgrad.data<float>(),idx.data<int>(),ingrad.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd Knn: %s\n", hipGetErrorString(err));
return 0;
}
return 1;
}
| 8e2903e9c179d393f096773045545e42fd09eddf.cu | #include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n, const int dim,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*16];
assert( dim <= 16 );
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*dim;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*dim+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
const float* xyz1= &(xyz[(i*n+j)*dim]) ;
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&3);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[k*dim+di]-xyz1[di];
d += dif*dif;
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+1)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+2)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+3)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[k*dim+di]-xyz1[di];
d += dif*dif;
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+1)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+2)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[(k+3)*dim+di]-xyz1[di];
d += dif*dif;
}
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float d = 0.0;
for(int di=0;di<dim;++di)
{
float dif=buf[k*dim+di]-xyz1[di];
d += dif*dif;
}
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto dim = xyz1.size(2);
if( dim != xyz2.size(2) ){
printf("dim do not match in chamfer_cuda_forward\n");
return 0;
}
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, n, dim, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, m, dim, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const int dim,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
int j2=idx1[i*n+j];
float g=grad_dist1[i*n+j]*2;
for(int di=0;di<dim;++di)
{
float x1=xyz1[(i*n+j)*dim+di];
float x2=xyz2[(i*m+j2)*dim+di];
atomicAdd(&(grad_xyz1[(i*n+j)*dim+di]),g*(x1-x2));
atomicAdd(&(grad_xyz2[(i*m+j2)*dim+di]),-(g*(x1-x2)));
}
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, cudaStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// cudaMemset(grad_xyz1,0,b*n*3*4);
// cudaMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
const auto dim = xyz1.size(2);
if( dim != xyz2.size(2) ){
printf("dim do not match in chamfer_cuda_forward\n");
return 0;
}
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,n,dim,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,m,dim,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__device__ inline void swapf(float & a, float & b)
{
float tmp = a;
a = b;
b = tmp;
}
__device__ inline void swap(int & a, int & b)
{
int tmp = a;
a = b ;
b = tmp;
}
__global__ void KnnKernel(int b,const int n,const int dim,const float * xyz,const int k,float * result,int * result_i){
const int size = 4096;
__shared__ float dist[size];
__shared__ int idx[size];
assert( n <= size );
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
{
for ( int i = blockIdx.y ; i < n ; i += gridDim.y )
{
for ( int j = threadIdx.x ; j < n ; j += blockDim.x )
{
if( i == j ){
dist[j] = 0;
idx[j] = j;
continue;
}
float d = 0.0;
for ( int di = 0 ; di < dim ; ++di )
{
float dif = xyz[(bi*n+i)*dim+di] - xyz[(bi*n+j)*dim+di];
d += dif*dif;
}
dist[j] = d;
idx[j] = j;
}
__syncthreads();
//odd-even sort
int pownum = int(log2(float(n)));
if ( n != pow(2, pownum) ){
for ( int cnt = 0 ; cnt < ( n + 1 ) / 2 ; ++cnt )
{
for ( int j = 2*threadIdx.x + 1 ; j < n ; j += 2*blockDim.x )
{
if ( dist[j] < dist[ j - 1 ] )
{
swapf(dist[j], dist[j-1]);
swap(idx[j], idx[j-1]);
}
}
__syncthreads();
for ( int j = 2*threadIdx.x + 2 ; j < n ; j += 2*blockDim.x )
{
if ( dist[j] < dist[ j - 1 ] )
{
swapf(dist[j], dist[j-1]);
swap(idx[j], idx[j-1]);
}
}
__syncthreads();
}
}else{
//Bitonic Sort
for (unsigned int t = 2; t <= n ; t *= 2)
{
// Bitonic merge:
for (unsigned int j = t / 2; j>0; j /= 2)
{
for (unsigned int tid = threadIdx.x ; tid < n ; tid += blockDim.x )
{
unsigned int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & t) == 0)
{
if (dist[tid] > dist[ixj])
{
swapf(dist[tid], dist[ixj]);
swap(idx[tid], idx[ixj]);
}
}
else
{
if (dist[tid] < dist[ixj])
{
swapf(dist[tid], dist[ixj]);
swap(idx[tid], idx[ixj]);
}
}
}
}
__syncthreads();
}
}
}
__syncthreads();
//copy result
for ( int j = threadIdx.x ; j < k ; j += blockDim.x )
{
result[(bi*n+i)*k+j] = dist[j+1];
result_i[ ((bi*n+i)*k+j)*2+0 ] = bi;
result_i[ ((bi*n+i)*k+j)*2+1 ] = idx[j+1];
}
}
}
}
int knn_cuda(at::Tensor xyz,at::Tensor k,at::Tensor dist,at::Tensor idx)
{
const auto bs = xyz.size(0);
const auto n = xyz.size(1); //num_points point cloud
const auto d = xyz.size(2);
int k_ = k.data<int>()[0];
KnnKernel<<<dim3(bs,16,1),512>>>(bs,n,d,xyz.data<float>(),k_,dist.data<float>(),idx.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd Knn: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void interpKernel(const int b, const int p,const int L,const int H,const int W,const float* z,const float* prob,int* idx,float* w,float* p)
{
float stepy = 1.0 / float(H - 1);
float stepx = 1.0 / float(W - 1);
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
for ( int li = threadIdx.x; li < L ; li += blockDim.x )
{
float zx = z[((bi*p+pi)*2+0)*L+li];
float zy = z[((bi*p+pi)*2+1)*L+li];
if( zx < 0.0 || zy < 0.0 || zx >= 1.0 || zy >= 1.0 )
{
p[(bi*p+pi)*L+Li] = 0.0;
for( int i = 0 ; i < 4 ; i ++)
{
idx[((bi*p+pi)*2+0)*4+i)*L+li] = -1;
idx[((bi*p+pi)*2+1)*4+i)*L+li] = -1;
w[((bi*p+pi)*4+i)*L+li] = 0.0;
}
continue;
}
int zxn = int(zx / stepx);
int zyn = int(zy / stepy);
//
idx[((bi*p+pi)*2+0)*4+0)*L+li] = zxn;
idx[((bi*p+pi)*2+1)*4+0)*L+li] = zyn;
idx[((bi*p+pi)*2+0)*4+1)*L+li] = zxn;
idx[((bi*p+pi)*2+1)*4+1)*L+li] = zyn+1;
idx[((bi*p+pi)*2+0)*4+2)*L+li] = zxn+1;
idx[((bi*p+pi)*2+1)*4+2)*L+li] = zyn;
idx[((bi*p+pi)*2+0)*4+3)*L+li] = zxn+1;
idx[((bi*p+pi)*2+1)*4+3)*L+li] = zyn+1;
//
float x1w = zx - zxn*stepx;
float x2w = (zxn+1)*stepx - zx;
float y1w = zy - zyn*stepy;
float y2w = (zyn+1)*stepy - zy;
//
float w1 = y2w*x2w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+0)*L+li] = w1;
float w2 = y1w*x2w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+1)*L+li] = w2;
float w3 = y2w*x1w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+2)*L+li] = w3
float w4 = y1w*x1w/((y1w+y2w)*(x1w+x2w));
w[((bi*p+pi)*4+3)*L+li] = w4;
//
float p1 = prob[((bi*p+pi)*H+zyn)*W+zxn];
float p2 = prob[((bi*p+pi)*H+zyn+1)*W+zxn];
float p3 = prob[((bi*p+pi)*H+zyn)*W+zxn+1];
float p4 = prob[((bi*p+pi)*H+zyn+1)*W+zxn+1];
//
p[(bi*p+pi)*L+Li] = p1*w1+p2*w2+p3*w3+p4*w4;
}
}
int interp_cuda_forward(at::Tensor z,at::Tensor prob,at::Tensor idx,at::Tensor w,at::Tensor p)
{
const auto b = z.size(0);
const auto p = z.size(1);
const auto L = z.size(3);
const auto H = prob.size(3);
const auto W = prob.size(4);
interpKernel<<<dim3(b,25,1),512>>>(b,p,L,H,W,z.data<float>(),prob.data<float>(),idx.data<int>(),w.data<float>(),p.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd Knn: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void interpGradKernel(const int b, const int p,const int L,const int H,const int W,const float* grad,const int* idx,const float* w,float* gradp)
{
float stepy = 1.0 / float(H - 1);
float stepx = 1.0 / float(W - 1);
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
for ( int li = threadIdx.x; li < L ; li += blockDim.x )
{
float g = grad[((bi*p+pi)*L+li];
for(int i = 0 ; i < 4; i++)
{
float wv = w[((bi*p+pi)*4+i)*L+li];
const int x = idx[((bi*p+pi)*2+0)*4+i)*L+li];
const int y = idx[((bi*p+pi)*2+1)*4+i)*L+li];
if((x == -1) || (y == -1))break;
atomicAdd(&(prob[((bi*p+pi)*H+y)*W+x]),g*wv);
}
}
}
int interp_cuda_backward(at::Tensor grad,at::Tensor idx,at::Tensor w,at::Tensor gradp)
{
const auto b = grad.size(0);
const auto p = grad.size(1);
const auto L = grad.size(2);
const auto H = gradp.size(3);
const auto W = gradp.size(4);
interpGradKernel<<<dim3(b,25,1),512>>>(b,p,L,H,W,grad.data<float>(),idx.data<int>(),w.data<float>(),gradp.data<float>())
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd Knn: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void selectKernel(const int b, const int p,const int dim,const int L,const int N,const float* in,const bool* select,int* idx,float* out)
{
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
{
for ( int ni = threadIdx.x; ni < N ; ni += blockDim.x )
{
idx[(bi*p+pi)*N+ni] = -1;
}
__syncthreads();
for ( int li = threadIdx.x; li < L ; li += blockDim.x )
{
if( select[(bi*p+pi)*L+Li] )
{
for( int ni = 0 ; ni < N ; ni ++ )
{
int v = atomicExch(&(idx[(bi*p+pi)*N+ni]),li)
if( v == -1 )
{
for(int di=0;di<dim;++di)
{
out[((bi*p+pi)*dim+di)*N+ni] = in[((bi*p+pi)*dim+di)*L+li];
}
break;
}else{
atomicExch(&(idx[(bi*p+pi)*N+ni]),v);
}
}
}
}
}
}
int select_cuda_forward(at::Tensor in,at::Tensor select,at::Tensor idx,at::Tensor out)
{
const auto b = in.size(0);
const auto p = in.size(1);
const auto L = in.size(-1);
const auto d = in.dim();
const int dim = 1;
if(d == 3)
{
dim = 1;
}else if(d == 4){
dim = in.size(2);
}else{
printf("input tensor must be (B,P,C,L) or (B,P,L)");
return 0;
}
const auto N = out.size(-1);
selectKernel<<<dim3(b,25,1),512>>>(b,p,dim,L,N,in.data<float>(),select.data<bool>(),idx.data<int>(),out.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd Knn: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
__global__ void selectGradKernel(const int b, const int p,const int dim,const int L,const int N,const float* outgrad,const int* idx,float* ingrad)
{
for ( int bi = blockIdx.x ; bi < b ; bi += gridDim.x )
for ( int pi = blockIdx.y; pi < p ; pi += gridDim.y )
{
for ( int ni = threadIdx.x; ni < N ; ni += blockDim.x )
{
int li = idx[(bi*p+pi)*N+ni];
for(int di=0;di<dim;++di)
{
ingrad[((bi*p+pi)*dim+di)*L+li] = outgrad[((bi*p+pi)*dim+di)*N+ni];
}
}
}
}
int select_cuda_backward(at::Tensor outgrad,at::Tensor idx,at::Tensor ingrad)
{
const auto b = outgrad.size(0);
const auto p = outgrad.size(1);
const auto N = outgrad.size(-1);
const auto d = outgrad.dim();
const auto L = ingrad.size(-1);
const int dim = 1;
if(d == 3)
{
dim = 1;
}else if(d == 4){
dim = in.size(2);
}else{
printf("input tensor must be (B,P,C,L) or (B,P,L)");
return 0;
}
const auto N = out.size(-1);
selectGradKernel<<<dim3(b,25,1),512>>>(b,p,dim,L,N,outgrad.data<float>(),idx.data<int>(),ingrad.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd Knn: %s\n", cudaGetErrorString(err));
return 0;
}
return 1;
}
|
2c0268057b0d1b48c8bc2b2df6d00be7d9dd2ea9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/label_specific_add_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LabelSpecificAddForward(const int n, const int dim, const Dtype* label,
Dtype* top_data, Dtype bias) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
if (top_data[index * dim + gt] > -bias) top_data[index * dim + gt] += bias;
}
}
template <typename Dtype>
void LabelSpecificAddLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
if (top[0] != bottom[0]) caffe_copy(count, bottom_data, top_data);
if (!transform_test_ && this->phase_ == TEST) return;
if (anneal_bias_) {
bias_ = bias_base_ + pow(((Dtype)1. + bias_gamma_ * iteration_), bias_power_) - (Dtype)1.;
bias_ = ::max(bias_, bias_min_);
bias_ = ::min(bias_, bias_max_);
iteration_++;
}
if (top.size() == 2) {
top[1]->mutable_cpu_data()[0] = bias_;
}
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificAddForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, top_data, bias_);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LabelSpecificAddLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (top[0] != bottom[0] && propagate_down[0]) {
int count = bottom[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, top_diff, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LabelSpecificAddLayer);
} // namespace caffe | 2c0268057b0d1b48c8bc2b2df6d00be7d9dd2ea9.cu | #include <algorithm>
#include <vector>
#include <cmath>
#include "caffe/layers/label_specific_add_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LabelSpecificAddForward(const int n, const int dim, const Dtype* label,
Dtype* top_data, Dtype bias) {
CUDA_KERNEL_LOOP(index, n) {
int gt = static_cast<int>(label[index]);
if (top_data[index * dim + gt] > -bias) top_data[index * dim + gt] += bias;
}
}
template <typename Dtype>
void LabelSpecificAddLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* label_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int num = bottom[0]->num();
int count = bottom[0]->count();
int dim = count / num;
if (top[0] != bottom[0]) caffe_copy(count, bottom_data, top_data);
if (!transform_test_ && this->phase_ == TEST) return;
if (anneal_bias_) {
bias_ = bias_base_ + pow(((Dtype)1. + bias_gamma_ * iteration_), bias_power_) - (Dtype)1.;
bias_ = std::max(bias_, bias_min_);
bias_ = std::min(bias_, bias_max_);
iteration_++;
}
if (top.size() == 2) {
top[1]->mutable_cpu_data()[0] = bias_;
}
// NOLINT_NEXT_LINE(whitespace/operators)
LabelSpecificAddForward<Dtype> << <CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS >> > (
num, dim, label_data, top_data, bias_);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
void LabelSpecificAddLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (top[0] != bottom[0] && propagate_down[0]) {
int count = bottom[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_copy(count, top_diff, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LabelSpecificAddLayer);
} // namespace caffe |
524fd5867f9c3b420fae1c7c7d0d9b9d963d7550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int size, int* a, int* b, int* c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < size)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
extern "C"
void VectorAdd(int size, int* a, int* b,int *c)
{
int* dev_a, * dev_b, * dev_c;
hipMalloc((void**)&dev_a, size * sizeof(int));
hipMalloc((void**)&dev_b, size * sizeof(int));
hipMalloc((void**)&dev_c, size * sizeof(int));
hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
add << <128, 128 >> > (size, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
} | 524fd5867f9c3b420fae1c7c7d0d9b9d963d7550.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int size, int* a, int* b, int* c)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < size)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
extern "C"
void VectorAdd(int size, int* a, int* b,int *c)
{
int* dev_a, * dev_b, * dev_c;
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
add << <128, 128 >> > (size, dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
} |
55af6829037fea843c0739688fe317ba4daadc58.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
//#include "Cross_Data_type.h"
#include "corr2Mex.h"
#include "normXcorr_GPUKernel_Final.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, params parameters,float *quality,float *dpX, float *dpY)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
hipMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
hipMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
//Allocate Space for Post-mean
float *postMean;
float *postVar;
hipMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
hipMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
// Device Memory Allocation for Cross-correlation Result
float *CorrD;
hipMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//Initialize Values for Displacement Results
float *qualityD;
hipMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY);
int *dpX_D;
hipMalloc((void **)&dpX_D,sizeof(int)*parameters.numX*parameters.numY);
int *dpY_D;
hipMalloc((void **)&dpY_D,sizeof(int)*parameters.numX*parameters.numY);
float *dpX_sD;
hipMalloc((void **)&dpX_sD,sizeof(float)*parameters.numX*parameters.numY);
float *dpY_sD;
hipMalloc((void **)&dpY_sD,sizeof(float)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
// Launch the device computation threads!
hipLaunchKernelGGL(( normXcorr_GPU), dim3(dimGrid), dim3(dimBlock), parameters.kernelX*parameters.kernelY*sizeof(float), 0, Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar);
int smemSize = 1024*sizeof(int)+1024*sizeof(float);
hipLaunchKernelGGL(( MaxElement), dim3(dimGrid),dim3(1024),smemSize, 0, CorrD,parameters,qualityD,dpX_D,dpY_D);
int numthreads = 512;
int numblocks = (parameters.numX*parameters.numY + numthreads -1)/numthreads;
hipLaunchKernelGGL(( subsample), dim3(numblocks),dim3(numthreads), 0, 0, CorrD,parameters,qualityD,dpX_D,dpY_D,dpX_sD,dpY_sD);
//hipMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,hipMemcpyDeviceToHost);
hipMemcpy(quality,qualityD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost);
hipMemcpy(dpX,dpX_sD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost);
hipMemcpy(dpY,dpY_sD,sizeof(float)*parameters.numX*parameters.numY,hipMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
hipFree(CorrD);
hipFree(qualityD);
hipFree(dpY_D);
hipFree(dpX_D);
hipFree(dpY_sD);
hipFree(dpX_sD);
hipFree(preMean);
hipFree(preVar);
hipFree(postMean);
hipFree(postVar);
//FreeDeviceMatrix(&Corrd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
fp = fopen("trialNumbers.inp","r");
// don't allocate memory on option 2
M.elements = (float*) malloc(size*sizeof(float));
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
| 55af6829037fea843c0739688fe317ba4daadc58.cu | // Host Side Code for Cross-correlation in GPU
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
//#include "Cross_Data_type.h"
#include "corr2Mex.h"
#include "normXcorr_GPUKernel_Final.cu"
using namespace std;
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width,int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
//// Cuda Kernel Call //////
void CorrelationOnDevice(const Matrix Pre, const Matrix Post, params parameters,float *quality,float *dpX, float *dpY)
{
// Load Pre and Post to the device
Matrix Pred = AllocateDeviceMatrix(Pre);
CopyToDeviceMatrix(Pred, Pre);
Matrix Postd = AllocateDeviceMatrix(Post);
CopyToDeviceMatrix(Postd, Post);
// Allocate Space for Pre-Mean
float *preMean;
float *preVar;
cudaMalloc((void **)&preMean,sizeof(float)*parameters.numX*parameters.numY);
cudaMalloc((void **)&preVar,sizeof(float)*parameters.numX*parameters.numY);
//Allocate Space for Post-mean
float *postMean;
float *postVar;
cudaMalloc((void **)&postMean,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
cudaMalloc((void **)&postVar,sizeof(float)*parameters.searchX*parameters.searchY*parameters.numX*parameters.numY);
// Device Memory Allocation for Cross-correlation Result
float *CorrD;
cudaMalloc((void **)&CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY);
//Initialize Values for Displacement Results
float *qualityD;
cudaMalloc((void **)&qualityD,sizeof(float)*parameters.numX*parameters.numY);
int *dpX_D;
cudaMalloc((void **)&dpX_D,sizeof(int)*parameters.numX*parameters.numY);
int *dpY_D;
cudaMalloc((void **)&dpY_D,sizeof(int)*parameters.numX*parameters.numY);
float *dpX_sD;
cudaMalloc((void **)&dpX_sD,sizeof(float)*parameters.numX*parameters.numY);
float *dpY_sD;
cudaMalloc((void **)&dpY_sD,sizeof(float)*parameters.numX*parameters.numY);
// Setup the execution configuration
dim3 dimBlock(parameters.searchX, parameters.searchY);
dim3 dimGrid(parameters.numX, parameters.numY);
// Launch the device computation threads!
normXcorr_GPU<<<dimGrid, dimBlock, parameters.kernelX*parameters.kernelY*sizeof(float)>>>(Pred,Postd,CorrD,parameters,preMean,preVar,postMean,postVar);
int smemSize = 1024*sizeof(int)+1024*sizeof(float);
MaxElement<<<dimGrid,1024,smemSize>>>(CorrD,parameters,qualityD,dpX_D,dpY_D);
int numthreads = 512;
int numblocks = (parameters.numX*parameters.numY + numthreads -1)/numthreads;
subsample<<<numblocks,numthreads>>>(CorrD,parameters,qualityD,dpX_D,dpY_D,dpX_sD,dpY_sD);
//cudaMemcpy(CorrH,CorrD,sizeof(float)*parameters.numX*parameters.numY*parameters.searchX*parameters.searchY,cudaMemcpyDeviceToHost);
cudaMemcpy(quality,qualityD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost);
cudaMemcpy(dpX,dpX_sD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost);
cudaMemcpy(dpY,dpY_sD,sizeof(float)*parameters.numX*parameters.numY,cudaMemcpyDeviceToHost);
// Free device matrices
FreeDeviceMatrix(&Pred);
FreeDeviceMatrix(&Postd);
cudaFree(CorrD);
cudaFree(qualityD);
cudaFree(dpY_D);
cudaFree(dpX_D);
cudaFree(dpY_sD);
cudaFree(dpX_sD);
cudaFree(preMean);
cudaFree(preVar);
cudaFree(postMean);
cudaFree(postVar);
//FreeDeviceMatrix(&Corrd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
Matrix AllocateMatrix(int height, int width,int init) // 1 is file read/ 0 is just allocation
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
FILE *fp;
fp = fopen("trialNumbers.inp","r");
// don't allocate memory on option 2
M.elements = (float*) malloc(size*sizeof(float));
if(init)
{
for(unsigned int i = 0; i < M.width * M.height; i++)
{
fscanf(fp,"%f",&M.elements[i]);
}
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
|
29335f74d6756b3369de1c69debc5d08b2095cf9.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) :
device_{that.device_},
data_h_{std::move(that.data_h_)},
data_d_{std::move(that.data_d_)},
gpu_access_{that.gpu_access_} {}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const {
return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0;
}
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_->data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_->data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_->data().get(), Size()};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
return {data_d_->data().get(), Size()};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_->begin(), data_d_->end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
SetDevice(other->device_);
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
SetDevice();
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
void Extend(HostDeviceVectorImpl* other) {
auto ori_size = this->Size();
this->Resize(ori_size + other->Size(), T());
if (HostCanWrite() && other->HostCanRead()) {
auto& h_vec = this->HostVector();
auto& other_vec = other->HostVector();
CHECK_EQ(h_vec.size(), ori_size + other->Size());
std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size);
} else {
auto ptr = other->ConstDevicePointer();
SetDevice();
CHECK_EQ(this->DeviceIdx(), other->DeviceIdx());
dh::safe_cuda(hipMemcpyAsync(this->DevicePointer() + ori_size,
ptr,
other->Size() * sizeof(T),
hipMemcpyDeviceToDevice));
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); }
SetDevice();
dh::safe_cuda(hipMemcpy(data_h_.data(),
data_d_->data().get(),
data_d_->size() * sizeof(T),
hipMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(),
data_h_.data(),
data_d_->size() * sizeof(T),
hipMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
GPUAccess Access() const { return gpu_access_; }
private:
int device_{-1};
std::vector<T> data_h_{};
std::unique_ptr<dh::device_vector<T>> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(),
data_d_->size() * sizeof(T), hipMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), begin,
data_d_->size() * sizeof(T), hipMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (data_d_ && new_size == data_d_->size()) { return; }
SetDevice();
data_d_->resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
if (!data_d_) {
data_d_.reset(new dh::device_vector<T>);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other)
: impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> new_impl(
new HostDeviceVectorImpl<T>(std::move(*other.impl_)));
delete impl_;
impl_ = new_impl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Extend(HostDeviceVector const& other) {
impl_->Extend(other.impl_);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
GPUAccess HostDeviceVector<T>::DeviceAccess() const {
return impl_->Access();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int32_t>; // bst_node_t
template class HostDeviceVector<uint8_t>;
template class HostDeviceVector<FeatureType>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<uint64_t>; // bst_row_t
template class HostDeviceVector<uint32_t>; // bst_feature_t
template class HostDeviceVector<RegTree::Node>;
#if defined(__APPLE__)
/*
* On OSX:
*
* typedef unsigned int uint32_t;
* typedef unsigned long long uint64_t;
* typedef unsigned long __darwin_size_t;
*/
template class HostDeviceVector<std::size_t>;
#endif // defined(__APPLE__)
} // namespace xgboost
| 29335f74d6756b3369de1c69debc5d08b2095cf9.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"
#include "device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) :
device_{that.device_},
data_h_{std::move(that.data_h_)},
data_d_{std::move(that.data_d_)},
gpu_access_{that.gpu_access_} {}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const {
return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0;
}
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_->data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_->data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_->data().get(), Size()};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
return {data_d_->data().get(), Size()};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
thrust::fill(data_d_->begin(), data_d_->end(), v);
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
SetDevice(other->device_);
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
SetDevice();
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
void Extend(HostDeviceVectorImpl* other) {
auto ori_size = this->Size();
this->Resize(ori_size + other->Size(), T());
if (HostCanWrite() && other->HostCanRead()) {
auto& h_vec = this->HostVector();
auto& other_vec = other->HostVector();
CHECK_EQ(h_vec.size(), ori_size + other->Size());
std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size);
} else {
auto ptr = other->ConstDevicePointer();
SetDevice();
CHECK_EQ(this->DeviceIdx(), other->DeviceIdx());
dh::safe_cuda(cudaMemcpyAsync(this->DevicePointer() + ori_size,
ptr,
other->Size() * sizeof(T),
cudaMemcpyDeviceToDevice));
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); }
SetDevice();
dh::safe_cuda(cudaMemcpy(data_h_.data(),
data_d_->data().get(),
data_d_->size() * sizeof(T),
cudaMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(),
data_h_.data(),
data_d_->size() * sizeof(T),
cudaMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
GPUAccess Access() const { return gpu_access_; }
private:
int device_{-1};
std::vector<T> data_h_{};
std::unique_ptr<dh::device_vector<T>> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(),
data_d_->size() * sizeof(T), cudaMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), begin,
data_d_->size() * sizeof(T), cudaMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (data_d_ && new_size == data_d_->size()) { return; }
SetDevice();
data_d_->resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
if (!data_d_) {
data_d_.reset(new dh::device_vector<T>);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other)
: impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> new_impl(
new HostDeviceVectorImpl<T>(std::move(*other.impl_)));
delete impl_;
impl_ = new_impl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Extend(HostDeviceVector const& other) {
impl_->Extend(other.impl_);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
GPUAccess HostDeviceVector<T>::DeviceAccess() const {
return impl_->Access();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<int32_t>; // bst_node_t
template class HostDeviceVector<uint8_t>;
template class HostDeviceVector<FeatureType>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<uint64_t>; // bst_row_t
template class HostDeviceVector<uint32_t>; // bst_feature_t
template class HostDeviceVector<RegTree::Node>;
#if defined(__APPLE__)
/*
* On OSX:
*
* typedef unsigned int uint32_t;
* typedef unsigned long long uint64_t;
* typedef unsigned long __darwin_size_t;
*/
template class HostDeviceVector<std::size_t>;
#endif // defined(__APPLE__)
} // namespace xgboost
|
1d32e7f68c6dccb4f503a5754fd6d2e2656a4ac7.hip | // !!! This is a file automatically generated by hipify!!!
/** @file vl_imreadjpeg.cu
** @brief Load and transform images asynchronously
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include "bits/imread.hpp"
#include "bits/impl/imread_helpers.hpp"
#include <assert.h>
#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include "bits/datamex.hpp"
#include "bits/mexutils.h"
#ifdef _MSC_VER
#undef max
#undef min
#endif
static int verbosity = 0 ;
/* option codes */
enum {
opt_num_threads = 0,
opt_prefetch,
opt_resize,
opt_pack,
opt_gpu,
opt_verbose,
opt_subtract_average,
opt_crop_size,
opt_crop_location,
opt_crop_anisotropy,
opt_flip,
opt_contrast,
opt_saturation,
opt_brightness,
opt_interpolation,
} ;
/* options */
VLMXOption options [] = {
{"NumThreads", 1, opt_num_threads },
{"Prefetch", 0, opt_prefetch },
{"Verbose", 0, opt_verbose },
{"Resize", 1, opt_resize },
{"Pack", 0, opt_pack },
{"GPU", 0, opt_gpu },
{"SubtractAverage", 1, opt_subtract_average },
{"CropAnisotropy", 1, opt_crop_anisotropy },
{"CropSize", 1, opt_crop_size },
{"CropLocation", 1, opt_crop_location },
{"Flip", 0, opt_flip },
{"Brightness", 1, opt_brightness },
{"Contrast", 1, opt_contrast },
{"Saturation", 1, opt_saturation },
{"Interpolation", 1, opt_interpolation },
{0, 0, 0 }
} ;
enum {
IN_FILENAMES = 0, IN_END
} ;
enum {
OUT_IMAGES = 0, OUT_END
} ;
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
/* ---------------------------------------------------------------- */
/* Batch */
/* ---------------------------------------------------------------- */
class Batch
{
public:
struct Item
{
enum State {
prefetch,
fetch,
ready
} state ;
Batch const & batch ;
std::string name ;
vl::ImageShape shape ;
mxArray * array ;
vl::ErrorCode error ;
char errorMessage [512] ;
bool borrowed ;
vl::MexTensor cpuArray ;
vl::MexTensor gpuArray ;
int index ;
size_t outputWidth ;
size_t outputHeight ;
size_t outputNumChannels ;
size_t cropWidth ;
size_t cropHeight ;
size_t cropOffsetX ;
size_t cropOffsetY ;
bool flip ;
vl::impl::ImageResizeFilter::FilterType filterType ;
float brightnessShift [3] ;
float contrastShift ;
float saturationShift ;
Item(Batch const & batch) ;
mxArray * relinquishArray() ;
} ;
enum ResizeMethod {
noResize,
resizeShortestSide,
fixedSize
} ;
enum PackingMethod {
individualArrays,
singleArray
};
enum CropLocation {
cropCenter,
cropRandom
} ;
Batch(vl::MexContext & context) ;
~Batch() ;
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode registerItem(std::string const & name) ;
size_t getNumberOfItems() const ;
Item * getItem(int index) ;
void clear() ;
void sync() const ;
vl::ErrorCode prefetch() ;
mxArray * relinquishArray() ;
void setGpuMode(bool gpu) ;
void setPackingMethod(PackingMethod method) ;
void setResizeMethod(ResizeMethod method, int height, int width) ;
void setAverage(double average []) ;
void setAverageImage(float const * image) ;
void setColorDeviation(double brightness [], double contrast, double saturation) ;
void setFlipMode(bool x) ;
void setCropAnisotropy(double minAnisotropy, double maxAnisotropy) ;
void setCropSize(double minSize, double maxSize) ;
void setCropLocation(CropLocation location) ;
void setFilterType(vl::impl::ImageResizeFilter::FilterType type) ;
PackingMethod getPackingMethod() const ;
Item * borrowNextItem() ;
void returnItem(Item * item) ;
private:
vl::MexContext & context ;
tthread::mutex mutable mutex ;
tthread::condition_variable mutable waitNextItemToBorrow ;
tthread::condition_variable mutable waitCompletion ;
bool quit ;
typedef std::vector<Item*> items_t ;
items_t items ;
int nextItem ;
int numReturnedItems ;
enum PackingMethod packingMethod ;
enum ResizeMethod resizeMethod ;
int resizeHeight ;
int resizeWidth ;
bool gpuMode ;
double average [3] ;
float * averageImage ;
double contrastDeviation ;
double saturationDeviation ;
double brightnessDeviation [9] ;
double minCropAnisotropy ;
double maxCropAnisotropy ;
double minCropSize ;
double maxCropSize ;
CropLocation cropLocation ;
bool flipMode ;
vl::impl::ImageResizeFilter::FilterType filterType ;
vl::MexTensor cpuPack ;
vl::MexTensor gpuPack ;
friend class ReaderTask ;
int gpuDevice ;
#if ENABLE_GPU
bool cudaStreamInitialized ;
hipStream_t cudaStream ;
float * cpuPinnedPack ;
size_t cpuPinnedPackSize ;
#endif
} ;
Batch::Item::Item(Batch const & batch)
: batch(batch),
cpuArray(batch.context),
gpuArray(batch.context),
borrowed(false),
error(vl::VLE_Success),
state(ready),
flip(false)
{
memset(errorMessage,sizeof(errorMessage),0) ;
}
mxArray * Batch::Item::relinquishArray()
{
if (batch.gpuMode) {
return gpuArray.relinquish() ;
} else {
return cpuArray.relinquish() ;
}
}
mxArray * Batch::relinquishArray()
{
if (gpuMode) {
return gpuPack.relinquish() ;
} else {
return cpuPack.relinquish() ;
}
}
Batch::Batch(vl::MexContext & context)
: context(context),
cpuPack(context),
gpuPack(context),
quit(true),
resizeMethod(noResize),
packingMethod(individualArrays),
gpuMode(false),
numReturnedItems(0),
averageImage(NULL)
#if ENABLE_GPU
, cpuPinnedPack(NULL),
cpuPinnedPackSize(0)
#endif
{ }
Batch::~Batch()
{
finalize() ;
}
size_t Batch::getNumberOfItems() const
{
return items.size() ;
}
Batch::Item * Batch::getItem(int index)
{
return items[index] ;
}
vl::ErrorCode Batch::init()
{
finalize() ;
LOG(2)<<"beginning batch" ;
quit = false ;
nextItem = 0 ;
numReturnedItems = 0 ;
// Restore defaults
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
contrastDeviation = 0. ;
saturationDeviation = 0. ;
memset(average, 0, sizeof(average)) ;
averageImage = NULL ;
cropLocation = cropCenter ;
minCropSize = 1. ;
maxCropSize = 1. ;
minCropAnisotropy = 1. ;
maxCropAnisotropy = 1. ;
flipMode = false ;
filterType = vl::impl::ImageResizeFilter::kBilinear ;
packingMethod = individualArrays ;
resizeMethod = noResize ;
gpuMode = false ;
gpuDevice = -1 ;
#if ENABLE_GPU
if (cudaStreamInitialized) {
hipStreamDestroy(cudaStream) ;
cudaStreamInitialized = false ;
}
#endif
return vl::VLE_Success ;
}
void Batch::finalize()
{
LOG(2)<<"finalizing batch" ;
// Clear current batch
clear() ;
// Release memory
#if ENABLE_GPU
if (cpuPinnedPack) {
hipHostFree(cpuPinnedPack) ;
cpuPinnedPack = 0 ;
cpuPinnedPackSize = 0 ;
}
#endif
// Signal waiting threads that we are quitting
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
quit = true ;
waitNextItemToBorrow.notify_all() ;
}
}
Batch::Item * Batch::borrowNextItem()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (true) {
if (quit) { return NULL ; }
if (nextItem < items.size()) {
Item * item = items[nextItem] ;
if (item->state != Item::ready) {
item->borrowed = true ;
nextItem ++ ;
return item ;
}
}
waitNextItemToBorrow.wait(mutex) ;
}
}
void Batch::returnItem(Batch::Item * item)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
numReturnedItems ++ ;
if (item->state == Item::fetch &&
numReturnedItems == items.size() &&
packingMethod == singleArray &&
gpuMode) {
#if ENABLE_GPU
LOG(2) << "push to GPU the pack" ;
hipError_t cerror ;
cerror = hipMemcpyAsync (gpuPack.getMemory(),
cpuPinnedPack,
gpuPack.getNumElements() * sizeof(float),
hipMemcpyHostToDevice,
cudaStream) ;
if (cerror != hipSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"cudaMemcpyAsnyc : '%s'", hipGetErrorString(cerror)) ;
}
#endif
}
item->borrowed = false ;
item->state = Batch::Item::ready ;
waitCompletion.notify_all() ;
}
void Batch::setAverageImage(float const * image)
{
if (image == NULL) {
if (averageImage) {
free(averageImage) ;
averageImage = NULL ;
}
return ;
}
assert (resizeMethod == fixedSize) ;
averageImage = (float*)malloc(sizeof(float) * resizeHeight * resizeWidth * 3) ;
memcpy(averageImage, image, sizeof(float) * resizeHeight * resizeWidth * 3) ;
}
void Batch::clear()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Stop threads from getting more tasks. After this any call to borrowItem() by a worker will
// stop in a waiting state. Thus, we simply wait for all of them to return their items.
nextItem = (int)items.size() ;
// Wait for all thread to return their items
for (int i = 0 ; i < items.size() ; ++i) {
while (items[i]->borrowed) {
waitCompletion.wait(mutex) ;
}
}
for (int i = 0 ; i < items.size() ; ++i) {
delete items[i] ;
}
items.clear() ;
// Clear average image
setAverageImage(NULL) ;
// At the end of the current (empty) list
nextItem = 0 ;
numReturnedItems = 0 ;
}
void Batch::sync() const
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Wait for threads to complete work for all items.
// Note that it is not enough to check that threads are all in a
// "done" state as this does not mean that all work has been done yet.
// Instead, we look at the number of items returned.
while (numReturnedItems < items.size()) {
waitCompletion.wait(mutex) ;
}
if (gpuMode) {
#if ENABLE_GPU
hipError_t cerror ;
cerror = hipStreamSynchronize(cudaStream) ;
if (cerror != hipSuccess) {
LOGERROR << "CUDA error while synchronizing a stream: '" << hipGetErrorString(cerror) << '\'' ;
}
#endif
}
}
vl::ErrorCode Batch::registerItem(std::string const & name)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
Item * item = new Item(*this) ;
item->index = (int)items.size() ;
item->name = name ;
item->state = Item::prefetch ;
items.push_back(item) ;
return vl::VLE_Success ;
}
void Batch::setGpuMode(bool gpu)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
#if ENABLE_GPU
if (gpu) {
hipGetDevice(&gpuDevice) ;
if (!cudaStreamInitialized) {
hipError_t cerror ;
cerror = hipStreamCreateWithFlags(&cudaStream, hipStreamNonBlocking) ;
if (cerror != hipSuccess) {
LOGERROR
<< "CUDA error while creating a stream '"
<< hipGetErrorString(cerror) << '\"' ;
} else {
cudaStreamInitialized = true ;
}
}
}
#endif
gpuMode = gpu ;
}
void Batch::setResizeMethod(Batch::ResizeMethod method, int height, int width)
{
resizeMethod = method ;
resizeHeight = height ;
resizeWidth = width ;
}
void Batch::setPackingMethod(Batch::PackingMethod method)
{
assert(method == individualArrays || method == singleArray) ;
packingMethod = method ;
}
Batch::PackingMethod Batch::getPackingMethod() const
{
return packingMethod ;
}
void Batch::setAverage(double average [])
{
::memcpy(this->average, average, sizeof(this->average)) ;
}
void Batch::setColorDeviation(double brightness [], double contrast, double saturation)
{
::memcpy(brightnessDeviation, brightness, sizeof(brightnessDeviation)) ;
contrastDeviation = contrast ;
saturationDeviation = saturation ;
}
void Batch::setFilterType(vl::impl::ImageResizeFilter::FilterType type)
{
filterType = type ;
}
void Batch::setFlipMode(bool x)
{
flipMode = x ;
}
void Batch::setCropAnisotropy(double minAnisotropy, double maxAnisotropy)
{
assert(minAnisotropy <= maxAnisotropy) ;
assert(0.0 <= minAnisotropy && minAnisotropy <= 1.0) ;
minCropAnisotropy = minAnisotropy ;
maxCropAnisotropy = maxAnisotropy ;
}
void Batch::setCropSize(double minSize, double maxSize)
{
assert(minSize <= maxSize) ;
assert(0.0 <= minSize && minSize <= 1.0) ;
assert(0.0 <= maxSize && maxSize <= 1.0) ;
minCropSize = minSize ;
maxCropSize = maxSize ;
}
void Batch::setCropLocation(CropLocation location)
{
assert(location == cropCenter || location == cropRandom) ;
cropLocation = location ;
}
//void Batch::getItemTransformation(Item * item)
//{
//
//}
vl::ErrorCode Batch::prefetch()
{
// Prod and then wait for reader threads to initialize the shape of the images
// and then perform the requried allocations.
waitNextItemToBorrow.notify_all() ;
sync() ;
// In packing mode, preallocate all memory here.
if (packingMethod == singleArray) {
assert(resizeMethod == fixedSize) ;
vl::TensorShape shape(resizeHeight, resizeWidth, 3, getNumberOfItems()) ;
if (gpuMode) {
#if ENABLE_GPU
gpuPack.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
gpuPack.makePersistent() ;
size_t memSize = shape.getNumElements() * sizeof(float) ;
if (cpuPinnedPackSize < memSize) {
if (cpuPinnedPack) {
hipHostFree(cpuPinnedPack) ;
}
hipHostMalloc(&cpuPinnedPack, memSize) ;
cpuPinnedPackSize = memSize ;
}
#endif
} else {
cpuPack.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
cpuPack.makePersistent() ;
}
}
// Get ready to reprocess all items.
nextItem = 0 ;
numReturnedItems = 0 ;
for (int i = 0 ; i < getNumberOfItems() ; ++ i) {
Batch::Item * item = getItem(i) ;
if (item->error == vl::VLE_Success) {
if (verbosity >= 2) {
mexPrintf("%20s: %d x %d x %d\n", item->name.c_str(), item->shape.width, item->shape.height, item->shape.depth) ;
}
} else {
mexPrintf("%20s: error '%s'\n", item->name.c_str(), item->errorMessage) ;
}
// Determine the shape of (height and width) of the output image. This is either
// the same as the input image, or with a fixed size for the shortest side,
// or a fixed size for both sides.
int outputHeight ;
int outputWidth ;
double cropHeight ;
double cropWidth ;
int dx ;
int dy ;
switch (resizeMethod) {
case noResize:
outputHeight = (int)item->shape.height ;
outputWidth = (int)item->shape.width ;
break ;
case resizeShortestSide: {
double scale1 = (double)resizeHeight / item->shape.width ;
double scale2 = (double)resizeHeight / item->shape.height ;
double scale = ::max(scale1, scale2) ;
outputHeight = ::max(1.0, round(scale * item->shape.height)) ;
outputWidth = ::max(1.0, round(scale * item->shape.width)) ;
break ;
}
case fixedSize:
outputHeight = resizeHeight ;
outputWidth = resizeWidth ;
break ;
}
// Determine the aspect ratio of the crop in the input image.
{
double anisotropyRatio = 1.0 ;
if (minCropAnisotropy == 0 || maxCropAnisotropy == 0) {
// Stretch crop to have the same shape as the input.
double inputAspect = (double)item->shape.width / item->shape.height ;
double outputAspect = (double)outputWidth / outputHeight ;
anisotropyRatio = inputAspect / outputAspect ;
} else {
double z = (double)rand() / RAND_MAX ;
double a = log(maxCropAnisotropy) ;
double b = log(minCropAnisotropy) ;
anisotropyRatio = exp(z * (b - a) + a) ;
}
cropWidth = outputWidth * sqrt(anisotropyRatio) ;
cropHeight = outputHeight / sqrt(anisotropyRatio) ;
}
// Determine the crop size.
{
double scale = ::min(item->shape.width / cropWidth,
item->shape.height / cropHeight) ;
double z = (double)rand() / RAND_MAX ;
#if 1
double a = maxCropSize * maxCropSize ;
double b = minCropSize * minCropSize ;
double size = sqrt(z * (b - a) + a) ;
#else
double a = maxCropSize ;
double b = minCropSize ;
double size = z * (b - a) + a ;
#endif
cropWidth *= scale * size ;
cropHeight *= scale * size ;
}
cropWidth = ::min(round(cropWidth), (double)item->shape.width) ;
cropHeight = ::min(round(cropHeight), (double)item->shape.height) ;
// Determine the crop location.
{
dx = item->shape.width - cropWidth ;
dy = item->shape.height - cropHeight ;
switch (cropLocation) {
case cropCenter:
dx /= 2 ;
dy /= 2 ;
break ;
case cropRandom:
dx = rand() % (dx + 1) ;
dy = rand() % (dy + 1) ;
break ;
default:
LOGERROR << "cropLocation not set" ;
}
}
// Save.
item->outputWidth = outputWidth ;
item->outputHeight = outputHeight ;
item->outputNumChannels = (packingMethod == individualArrays) ? item->shape.depth : 3 ;
item->cropWidth = cropWidth ;
item->cropHeight = cropHeight ;
item->cropOffsetX = dx ;
item->cropOffsetY = dy ;
item->flip = flipMode && (rand() > RAND_MAX/2) ;
item->filterType = filterType ;
// Color processing.
item->saturationShift = 1. + saturationDeviation * (2.*(double)rand()/RAND_MAX - 1.) ;
item->contrastShift = 1. + contrastDeviation * (2.*(double)rand()/RAND_MAX - 1.) ;
{
int numChannels = item->outputNumChannels ;
double w [3] ;
for (int i = 0 ; i < numChannels ; ++i) { w[i] = vl::randn() ; }
for (int i = 0 ; i < numChannels ; ++i) {
item->brightnessShift[i] = 0. ;
for (int j = 0 ; j < numChannels ; ++j) {
item->brightnessShift[i] += brightnessDeviation[i + 3*j] * w[i] ;
}
}
}
LOG(2)
<< "input (" << item->shape.width << " x " << item->shape.height << " x " << item->shape.depth << ") "
<< "output (" << item->outputWidth << " x " << item->outputHeight << " x " << item->outputNumChannels << ") "
<< "crop (" << item->cropWidth << " x " << item->cropHeight << ") "
<< "offset (" << item->cropOffsetX << ", " << item->cropOffsetY << ")" ;
if (packingMethod == individualArrays) {
vl::TensorShape shape(outputHeight, outputWidth, item->outputNumChannels, 1) ;
item->cpuArray.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
item->cpuArray.makePersistent() ;
if (gpuMode) {
item->gpuArray.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
item->gpuArray.makePersistent() ;
}
}
// Ready to fetch
item->state = Item::fetch ;
}
// Notify that we are ready to fetch
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
waitNextItemToBorrow.notify_all() ;
}
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* ReaderTask */
/* ---------------------------------------------------------------- */
class ReaderTask
{
public:
ReaderTask() ;
~ReaderTask() { finalize() ; }
vl::ErrorCode init(Batch * batch, int index) ;
void finalize() ;
private:
int index ;
Batch * batch ;
tthread::thread * thread ;
vl::ImageReader * reader ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
void * getBuffer(int index, size_t size) ;
int gpuDevice ;
private:
ReaderTask(ReaderTask const &) ;
ReaderTask & operator= (ReaderTask const &) ;
struct Buffer {
void * memory ;
size_t size ;
} buffers [2] ;
} ;
void ReaderTask::threadEntryPoint(void * thing)
{
((ReaderTask*)thing)->entryPoint() ;
}
ReaderTask::ReaderTask()
: batch(NULL), thread(NULL), reader(NULL)
{
memset(buffers, 0, sizeof(buffers)) ;
}
void * ReaderTask::getBuffer(int index, size_t size)
{
if (buffers[index].size < size) {
if (buffers[index].memory) {
free(buffers[index].memory) ;
}
buffers[index].memory = malloc(size) ;
buffers[index].size = size ;
}
return buffers[index].memory ;
}
void ReaderTask::entryPoint()
{
LOG(2) << "reader " << index << " task staring" ;
while (true) {
#if ENABLE_GPU
if (batch->gpuMode && batch->gpuDevice != gpuDevice) {
LOG(2) << "reader " << index << " setting GPU device" ;
hipSetDevice(batch->gpuDevice) ;
hipGetDevice(&gpuDevice) ;
}
#endif
Batch::Item * item = batch->borrowNextItem() ;
LOG(3) << "borrowed " << item ;
if (item == NULL) { break ; }
if (item->error != vl::VLE_Success) {
batch->returnItem(item) ;
continue ;
}
switch (item->state) {
case Batch::Item::prefetch: {
item->error = reader->readShape(item->shape, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
}
break ;
}
case Batch::Item::fetch: {
// Get the CPU buffer that will hold the pixels.
float * outputPixels;
if (batch->getPackingMethod() == Batch::individualArrays) {
outputPixels = (float*)item->cpuArray.getMemory() ;
} else {
if (batch->gpuMode) {
#if ENABLE_GPU
outputPixels = batch->cpuPinnedPack ;
#else
snprintf(item->errorMessage, sizeof(item->errorMessage), "GPU support not compiled.") ;
break;
#endif
} else {
outputPixels = (float*)batch->cpuPack.getMemory() ;
}
outputPixels += item->outputHeight*item->outputWidth*3*item->index ;
}
// Read full image.
float * inputPixels = (float*)getBuffer(0,
item->shape.height *
item->shape.width *
item->shape.depth * sizeof(float)) ;
item->error = reader->readPixels(inputPixels, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
break ;
}
// Crop.
float * temp = (float*)getBuffer(1,
item->outputHeight *
item->shape.width *
item->shape.depth * sizeof(float)) ;
vl::impl::imageResizeVertical(temp, inputPixels,
item->outputHeight,
item->shape.height,
item->shape.width,
item->shape.depth,
item->cropHeight,
item->cropOffsetY,
false, // flip
item->filterType) ;
vl::impl::imageResizeVertical(outputPixels, temp,
item->outputWidth,
item->shape.width,
item->outputHeight,
item->shape.depth,
item->cropWidth,
item->cropOffsetX,
item->flip,
item->filterType) ;
// Postprocess colors.
{
size_t inputNumChannels = item->shape.depth ;
size_t K = item->outputNumChannels ;
size_t n = item->outputHeight*item->outputWidth ;
if (batch->averageImage) {
// If there is an average image, then subtract it now.
// Grayscale images are expanded here to color if needed.
// Withouth an average image,
// they are expanded later.
for (int k = inputNumChannels ; k < K ; ++k) {
::memcpy(outputPixels + n*k, outputPixels, sizeof(float) * n) ;
}
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(batch->context,
n * item->outputNumChannels,
-1.0f,
batch->averageImage, 1,
outputPixels, 1) ;
inputNumChannels = K ;
}
float dv [3] ;
float * channels [3] ;
for (int k = 0 ; k < K ; ++k) {
channels[k] = outputPixels + n * k ;
}
for (int k = 0 ; k < inputNumChannels ; ++k) {
dv[k] = (1. - 2. * item->contrastShift) *
(batch->average[k] + item->brightnessShift[k]);
if (item->contrastShift != 1.) {
float mu = 0.f ;
float const * pixel = channels[k] ;
float const * end = channels[k] + n ;
while (pixel != end) { mu += *pixel++ ; }
mu /= n ;
dv[k] -= (1.0 - item->contrastShift) * mu ;
}
}
{
float const * end = channels[0] + n ;
float v [3] ;
if (K == 3 && inputNumChannels == 3) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[1] + dv[1] ; mu += v[1] ;
v[2] = *channels[2] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else if (K == 3 && inputNumChannels == 1) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[0] + dv[1] ; mu += v[1] ;
v[2] = *channels[0] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else {
float const a = item->contrastShift ;
while (channels[0] != end) {
float v = *channels[0] + dv[0] ;
*channels[0]++ = a * v ;
}
}
}
}
// Copy to GPU.
if (batch->getPackingMethod() == Batch::individualArrays && batch->gpuMode) {
#if ENABLE_GPU
hipError_t cerror ;
cerror = hipMemcpyAsync (item->gpuArray.getMemory(),
outputPixels,
item->gpuArray.getNumElements() * sizeof(float),
hipMemcpyHostToDevice,
batch->cudaStream) ;
if (cerror != hipSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"CUDA error while copying memory from host to device: '%s'", hipGetErrorString(cerror)) ;
break ;
}
#endif
}
break ;
}
case Batch::Item::ready:
break ;
}
batch->returnItem(item) ;
}
LOG(2) << "reader " << index << " task quitting" ;
}
void ReaderTask::finalize()
{
LOG(2)<<"finalizing reader " << index ;
if (thread) {
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
for (int i = 0 ; i < sizeof(buffers)/sizeof(Buffer) ; ++i) {
if (buffers[i].memory) {
free(buffers[i].memory) ;
buffers[i].memory = NULL ;
buffers[i].size = 0 ;
}
}
if (reader) {
delete reader ;
reader = NULL ;
}
index = -1 ;
batch = NULL ;
}
vl::ErrorCode ReaderTask::init(Batch * batch, int index)
{
finalize() ;
this->batch = batch ;
this->index = index ;
thread = new tthread::thread(threadEntryPoint, this) ;
reader = new vl::ImageReader() ;
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
Batch batch(context) ;
bool batchIsInitialized = false ;
typedef std::vector<ReaderTask*> readers_t ;
readers_t readers ;
void atExit()
{
if (batchIsInitialized) {
batch.finalize() ;
batchIsInitialized = false ;
}
for (int r = 0 ; r < readers.size() ; ++r) {
readers[r]->finalize() ;
delete readers[r] ;
}
readers.clear() ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
bool prefetch = false ;
bool gpuMode = false ;
int requestedNumThreads = readers.size() ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
Batch::PackingMethod packingMethod = Batch::individualArrays ;
Batch::ResizeMethod resizeMethod = Batch::noResize ;
int resizeWidth = -1 ;
int resizeHeight = -1 ;
vl::ErrorCode error ;
double average [3] = {0.} ;
vl::MexTensor averageImage(context) ;
double brightnessDeviation [9] = {0.} ;
double saturationDeviation = 0. ;
double contrastDeviation = 0. ;
bool flipMode = false ;
Batch::CropLocation cropLocation = Batch::cropCenter ;
double minCropSize = 1.0, maxCropSize = 1.0 ;
double minCropAnisotropy = 1.0, maxCropAnisotropy = 1.0 ;
vl::impl::ImageResizeFilter::FilterType filterType = vl::impl::ImageResizeFilter::kBilinear ;
verbosity = 0 ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "There is less than one argument.") ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_prefetch :
prefetch = true ;
break ;
case opt_pack :
packingMethod = Batch::singleArray ;
break ;
case opt_gpu :
#ifndef ENABLE_GPU
vlmxError(VLMXE_IllegalArgument, "Not compiled with GPU support.") ;
#endif
gpuMode = true ;
break ;
case opt_num_threads :
requestedNumThreads = (int)mxGetScalar(optarg) ;
break ;
case opt_resize :
if (!vlmxIsPlainVector(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "RESIZE is not a plain vector.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1 :
resizeMethod = Batch::resizeShortestSide ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[0] ;
break ;
case 2 :
resizeMethod = Batch::fixedSize ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[1] ;
break;
default:
vlmxError(VLMXE_IllegalArgument, "RESIZE does not have one or two dimensions.") ;
break ;
}
if (resizeHeight < 1 || resizeWidth < 1) {
vlmxError(VLMXE_IllegalArgument, "An element of RESIZE is smaller than one.") ;
}
break ;
case opt_brightness: {
if (!vlmxIsPlainMatrix(optarg, -1, -1)) {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS is not a plain matrix.") ;
}
size_t n = mxGetNumberOfElements(optarg) ;
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
if (n == 1) {
double x = mxGetPr(optarg)[0] ;
brightnessDeviation[0] = x;
brightnessDeviation[3] = x;
brightnessDeviation[8] = x;
} else if (n == 3) {
double const* x = mxGetPr(optarg) ;
brightnessDeviation[0] = x[0];
brightnessDeviation[3] = x[1];
brightnessDeviation[8] = x[2];
} else if (n == 9) {
memcpy(brightnessDeviation, mxGetPr(optarg), sizeof(brightnessDeviation)) ;
} else {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS does not have 1, 3, or 9 elements.") ;
}
break ;
}
case opt_saturation: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not in the [0,1] range..") ;
}
saturationDeviation = x ;
break ;
}
case opt_contrast: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not in the [0,1] range..") ;
}
contrastDeviation = x ;
break ;
}
case opt_crop_anisotropy: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY is not a plain scalar or vector with two components.") ;
}
minCropAnisotropy = mxGetPr(optarg)[0] ;
maxCropAnisotropy = mxGetPr(optarg)[::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropAnisotropy < 0.0 || minCropAnisotropy > maxCropAnisotropy) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY values are not in the legal range.") ;
}
break ;
}
case opt_crop_size: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE is not a plain scalar or vector with two components.") ;
}
minCropSize = mxGetPr(optarg)[0] ;
maxCropSize = mxGetPr(optarg)[::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropSize < 0.0 || minCropSize > maxCropSize || maxCropSize > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE values are not in the legal range.") ;
}
break ;
}
case opt_crop_location: {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION is not a string") ;
}
if (vlmxCompareToStringI(optarg, "random") == 0) {
cropLocation = Batch::cropRandom ;
} else if (vlmxCompareToStringI(optarg, "center") == 0) {
cropLocation = Batch::cropCenter ;
} else {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION value unknown.") ;
}
break ;
}
case opt_subtract_average: {
if (vlmxIsVector(optarg,1) || vlmxIsVector(optarg, 3)) {
size_t n = mxGetNumberOfElements(optarg) ;
switch (mxGetClassID(optarg)) {
case mxSINGLE_CLASS: {
float * x = (float*)mxGetData(optarg) ;
average[0] = x[::min((size_t)0,n-1)] ;
average[1] = x[::min((size_t)1,n-1)] ;
average[2] = x[::min((size_t)2,n-1)] ;
break ;
}
case mxDOUBLE_CLASS: {
double * x = mxGetPr(optarg) ;
average[0] = (float)x[::min((size_t)0,n-1)] ;
average[1] = (float)x[::min((size_t)1,n-1)] ;
average[2] = (float)x[::min((size_t)2,n-1)] ;
break ;
}
default:
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not SINGLE or DOUBLE vector.") ;
}
} else {
if (mxGetClassID(optarg) != mxSINGLE_CLASS ||
mxGetNumberOfDimensions(optarg) > 3) {
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not a SINGLE image of a compatible shape.") ;
}
averageImage.init(optarg) ;
}
break ;
}
case opt_flip: {
flipMode = true ;
break ;
}
case opt_interpolation: {
if (!vlmxIsString(optarg,-1)) {
vlmxError(VLMXE_IllegalArgument, "INTERPOLATION is not a string.") ;
}
if (vlmxIsEqualToStringI(optarg, "box")) {
filterType = vl::impl::ImageResizeFilter::kBox ;
} else if (vlmxIsEqualToStringI(optarg, "bilinear")) {
filterType = vl::impl::ImageResizeFilter::kBilinear ;
} else if (vlmxIsEqualToStringI(optarg, "bicubic")) {
filterType = vl::impl::ImageResizeFilter::kBicubic ;
} else if (vlmxIsEqualToStringI(optarg, "lanczos2")) {
filterType = vl::impl::ImageResizeFilter::kLanczos2 ;
} else if (vlmxIsEqualToStringI(optarg, "lanczos3")) {
filterType = vl::impl::ImageResizeFilter::kLanczos3 ;
} else {
vlmxError(VLMXE_IllegalArgument, "INTERPOLATION is not a supported method.") ;
}
break;
break ;
}
}
}
if (averageImage) {
if (resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "Cannot subtract an average image unless RESIZE is used to set the size of the output.") ;
}
if (averageImage.getNumDimensions() != 3 ||
averageImage.getHeight() != resizeHeight ||
averageImage.getWidth() != resizeWidth ||
averageImage.getDepth() !=3) {
vlmxError(VLMXE_IllegalArgument, "The average image is not a RESIZEHEIGHT x RESIZEWIDTH x 3 array.") ;
}
}
/* -------------------------------------------------------------- */
/* Do the work */
/* -------------------------------------------------------------- */
if (!mxIsCell(in[IN_FILENAMES])) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES is not a cell array of strings.") ;
}
// If the requested number of threads changes, finalize everything
requestedNumThreads = ::max(requestedNumThreads, 1) ;
if (readers.size() != requestedNumThreads) {
atExit() ; // Delete threads and current batch
}
// Prepare batch.
if (!batchIsInitialized) {
error = batch.init() ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not initialize a batch structure") ;
}
batchIsInitialized = true ;
}
// Prepare reader tasks.
for (int r = readers.size() ; r < requestedNumThreads ; ++r) {
readers.push_back(new ReaderTask()) ;
vl::ErrorCode error = readers[r]->init(&batch, r) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not create the requested number of threads") ;
}
}
// Extract filenames as strings.
bool sameAsPrefeteched = true ;
std::vector<std::string> filenames ;
for (int i = 0 ; i < (int)mxGetNumberOfElements(in[IN_FILENAMES]) ; ++i) {
mxArray* filenameArray = mxGetCell(in[IN_FILENAMES], i) ;
if (!vlmxIsString(filenameArray,-1)) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES contains an entry that is not a string.") ;
}
char filename [512] ;
mxGetString (filenameArray, filename, sizeof(filename)/sizeof(char)) ;
filenames.push_back(std::string(filename)) ;
sameAsPrefeteched &= (i < batch.getNumberOfItems() && batch.getItem(i)->name == filenames[i]) ;
}
// If the list of names is not the same as the prefetched ones,
// start a new cycle.
if (!sameAsPrefeteched) {
batch.clear() ;
// Check compatibility of options
if (packingMethod == Batch::singleArray && resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "PACK must be used in combination with resizing to a fixed size.") ;
}
if (verbosity >= 2) {
mexPrintf("vl_imreadjpeg: gpu mode: %s\n", gpuMode?"yes":"no") ;
mexPrintf("vl_imreadjpeg: crop anisotropy: [%.1g, %.1g]\n",
minCropAnisotropy, maxCropAnisotropy) ;
mexPrintf("vl_imreadjpeg: crop size: [%.1g, %.1g]\n",
minCropSize, maxCropSize) ;
}
batch.setResizeMethod(resizeMethod, resizeHeight, resizeWidth) ;
batch.setPackingMethod(packingMethod) ;
batch.setGpuMode(gpuMode) ;
batch.setFlipMode(flipMode) ;
batch.setCropLocation(cropLocation) ;
batch.setCropAnisotropy(minCropAnisotropy, maxCropAnisotropy) ;
batch.setCropSize(minCropSize, maxCropSize) ;
batch.setColorDeviation(brightnessDeviation,
contrastDeviation,
saturationDeviation) ;
batch.setAverage(average) ;
if (averageImage) {
batch.setAverageImage((float const*)averageImage.getMemory()) ;
}
batch.setFilterType(filterType) ;
for (int i = 0 ; i < filenames.size() ; ++ i) {
batch.registerItem(filenames[i]) ;
}
batch.prefetch() ;
}
// Done if prefetching only.
if (prefetch) { return ; }
// Return result.
batch.sync() ;
switch (batch.getPackingMethod()) {
case Batch::singleArray: {
mwSize dims [] = {1,1} ;
out[OUT_IMAGES] = mxCreateCellArray(2, dims) ;
mxSetCell(out[OUT_IMAGES], 0, batch.relinquishArray()) ;
break ;
}
case Batch::individualArrays:
out[OUT_IMAGES] = mxCreateCellArray(mxGetNumberOfDimensions(in[IN_FILENAMES]),
mxGetDimensions(in[IN_FILENAMES])) ;
for (int i = 0 ; i < batch.getNumberOfItems() ; ++i) {
Batch::Item * item = batch.getItem(i) ;
if (item->error != vl::VLE_Success) {
vlmxWarning(VLMXE_Execution, "could not read image '%s' because '%s'",
item->name.c_str(),
item->errorMessage) ;
} else {
mxSetCell(out[OUT_IMAGES], i, item->relinquishArray()) ;
}
}
break ;
}
// Finalize.
batch.clear() ;
}
| 1d32e7f68c6dccb4f503a5754fd6d2e2656a4ac7.cu | /** @file vl_imreadjpeg.cu
** @brief Load and transform images asynchronously
** @author Andrea Vedaldi
**/
/*
Copyright (C) 2014-16 Andrea Vedaldi.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "bits/impl/tinythread.h"
#include "bits/impl/blashelper.hpp"
#include "bits/imread.hpp"
#include "bits/impl/imread_helpers.hpp"
#include <assert.h>
#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <sstream>
#include <cstdlib>
#include "bits/datamex.hpp"
#include "bits/mexutils.h"
#ifdef _MSC_VER
#undef max
#undef min
#endif
static int verbosity = 0 ;
/* option codes */
enum {
opt_num_threads = 0,
opt_prefetch,
opt_resize,
opt_pack,
opt_gpu,
opt_verbose,
opt_subtract_average,
opt_crop_size,
opt_crop_location,
opt_crop_anisotropy,
opt_flip,
opt_contrast,
opt_saturation,
opt_brightness,
opt_interpolation,
} ;
/* options */
VLMXOption options [] = {
{"NumThreads", 1, opt_num_threads },
{"Prefetch", 0, opt_prefetch },
{"Verbose", 0, opt_verbose },
{"Resize", 1, opt_resize },
{"Pack", 0, opt_pack },
{"GPU", 0, opt_gpu },
{"SubtractAverage", 1, opt_subtract_average },
{"CropAnisotropy", 1, opt_crop_anisotropy },
{"CropSize", 1, opt_crop_size },
{"CropLocation", 1, opt_crop_location },
{"Flip", 0, opt_flip },
{"Brightness", 1, opt_brightness },
{"Contrast", 1, opt_contrast },
{"Saturation", 1, opt_saturation },
{"Interpolation", 1, opt_interpolation },
{0, 0, 0 }
} ;
enum {
IN_FILENAMES = 0, IN_END
} ;
enum {
OUT_IMAGES = 0, OUT_END
} ;
/* ---------------------------------------------------------------- */
/* Logger */
/* ---------------------------------------------------------------- */
namespace vl {
class Logger
{
public:
Logger() ;
~Logger() ;
std::ostringstream & getStream() ;
protected:
std::ostringstream stringStream ;
private:
// Disable
Logger(const Logger&) ;
Logger& operator= (const Logger&) ;
} ;
}
vl::Logger::Logger()
{ }
vl::Logger::~Logger()
{
printf("%s\n", stringStream.str().c_str()) ;
//fflush(stdout) ;
}
std::ostringstream &
vl::Logger::getStream()
{
return stringStream ;
}
#define LOGERROR \
vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
#define LOG(level) \
if (verbosity < level) { } \
else vl::Logger().getStream() \
<<"[info] "<<__func__<<"::"
/* ---------------------------------------------------------------- */
/* Batch */
/* ---------------------------------------------------------------- */
class Batch
{
public:
struct Item
{
enum State {
prefetch,
fetch,
ready
} state ;
Batch const & batch ;
std::string name ;
vl::ImageShape shape ;
mxArray * array ;
vl::ErrorCode error ;
char errorMessage [512] ;
bool borrowed ;
vl::MexTensor cpuArray ;
vl::MexTensor gpuArray ;
int index ;
size_t outputWidth ;
size_t outputHeight ;
size_t outputNumChannels ;
size_t cropWidth ;
size_t cropHeight ;
size_t cropOffsetX ;
size_t cropOffsetY ;
bool flip ;
vl::impl::ImageResizeFilter::FilterType filterType ;
float brightnessShift [3] ;
float contrastShift ;
float saturationShift ;
Item(Batch const & batch) ;
mxArray * relinquishArray() ;
} ;
enum ResizeMethod {
noResize,
resizeShortestSide,
fixedSize
} ;
enum PackingMethod {
individualArrays,
singleArray
};
enum CropLocation {
cropCenter,
cropRandom
} ;
Batch(vl::MexContext & context) ;
~Batch() ;
vl::ErrorCode init() ;
void finalize() ;
vl::ErrorCode registerItem(std::string const & name) ;
size_t getNumberOfItems() const ;
Item * getItem(int index) ;
void clear() ;
void sync() const ;
vl::ErrorCode prefetch() ;
mxArray * relinquishArray() ;
void setGpuMode(bool gpu) ;
void setPackingMethod(PackingMethod method) ;
void setResizeMethod(ResizeMethod method, int height, int width) ;
void setAverage(double average []) ;
void setAverageImage(float const * image) ;
void setColorDeviation(double brightness [], double contrast, double saturation) ;
void setFlipMode(bool x) ;
void setCropAnisotropy(double minAnisotropy, double maxAnisotropy) ;
void setCropSize(double minSize, double maxSize) ;
void setCropLocation(CropLocation location) ;
void setFilterType(vl::impl::ImageResizeFilter::FilterType type) ;
PackingMethod getPackingMethod() const ;
Item * borrowNextItem() ;
void returnItem(Item * item) ;
private:
vl::MexContext & context ;
tthread::mutex mutable mutex ;
tthread::condition_variable mutable waitNextItemToBorrow ;
tthread::condition_variable mutable waitCompletion ;
bool quit ;
typedef std::vector<Item*> items_t ;
items_t items ;
int nextItem ;
int numReturnedItems ;
enum PackingMethod packingMethod ;
enum ResizeMethod resizeMethod ;
int resizeHeight ;
int resizeWidth ;
bool gpuMode ;
double average [3] ;
float * averageImage ;
double contrastDeviation ;
double saturationDeviation ;
double brightnessDeviation [9] ;
double minCropAnisotropy ;
double maxCropAnisotropy ;
double minCropSize ;
double maxCropSize ;
CropLocation cropLocation ;
bool flipMode ;
vl::impl::ImageResizeFilter::FilterType filterType ;
vl::MexTensor cpuPack ;
vl::MexTensor gpuPack ;
friend class ReaderTask ;
int gpuDevice ;
#if ENABLE_GPU
bool cudaStreamInitialized ;
cudaStream_t cudaStream ;
float * cpuPinnedPack ;
size_t cpuPinnedPackSize ;
#endif
} ;
Batch::Item::Item(Batch const & batch)
: batch(batch),
cpuArray(batch.context),
gpuArray(batch.context),
borrowed(false),
error(vl::VLE_Success),
state(ready),
flip(false)
{
memset(errorMessage,sizeof(errorMessage),0) ;
}
mxArray * Batch::Item::relinquishArray()
{
if (batch.gpuMode) {
return gpuArray.relinquish() ;
} else {
return cpuArray.relinquish() ;
}
}
mxArray * Batch::relinquishArray()
{
if (gpuMode) {
return gpuPack.relinquish() ;
} else {
return cpuPack.relinquish() ;
}
}
Batch::Batch(vl::MexContext & context)
: context(context),
cpuPack(context),
gpuPack(context),
quit(true),
resizeMethod(noResize),
packingMethod(individualArrays),
gpuMode(false),
numReturnedItems(0),
averageImage(NULL)
#if ENABLE_GPU
, cpuPinnedPack(NULL),
cpuPinnedPackSize(0)
#endif
{ }
Batch::~Batch()
{
finalize() ;
}
size_t Batch::getNumberOfItems() const
{
return items.size() ;
}
Batch::Item * Batch::getItem(int index)
{
return items[index] ;
}
vl::ErrorCode Batch::init()
{
finalize() ;
LOG(2)<<"beginning batch" ;
quit = false ;
nextItem = 0 ;
numReturnedItems = 0 ;
// Restore defaults
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
contrastDeviation = 0. ;
saturationDeviation = 0. ;
memset(average, 0, sizeof(average)) ;
averageImage = NULL ;
cropLocation = cropCenter ;
minCropSize = 1. ;
maxCropSize = 1. ;
minCropAnisotropy = 1. ;
maxCropAnisotropy = 1. ;
flipMode = false ;
filterType = vl::impl::ImageResizeFilter::kBilinear ;
packingMethod = individualArrays ;
resizeMethod = noResize ;
gpuMode = false ;
gpuDevice = -1 ;
#if ENABLE_GPU
if (cudaStreamInitialized) {
cudaStreamDestroy(cudaStream) ;
cudaStreamInitialized = false ;
}
#endif
return vl::VLE_Success ;
}
void Batch::finalize()
{
LOG(2)<<"finalizing batch" ;
// Clear current batch
clear() ;
// Release memory
#if ENABLE_GPU
if (cpuPinnedPack) {
cudaFreeHost(cpuPinnedPack) ;
cpuPinnedPack = 0 ;
cpuPinnedPackSize = 0 ;
}
#endif
// Signal waiting threads that we are quitting
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
quit = true ;
waitNextItemToBorrow.notify_all() ;
}
}
Batch::Item * Batch::borrowNextItem()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
while (true) {
if (quit) { return NULL ; }
if (nextItem < items.size()) {
Item * item = items[nextItem] ;
if (item->state != Item::ready) {
item->borrowed = true ;
nextItem ++ ;
return item ;
}
}
waitNextItemToBorrow.wait(mutex) ;
}
}
void Batch::returnItem(Batch::Item * item)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
numReturnedItems ++ ;
if (item->state == Item::fetch &&
numReturnedItems == items.size() &&
packingMethod == singleArray &&
gpuMode) {
#if ENABLE_GPU
LOG(2) << "push to GPU the pack" ;
cudaError_t cerror ;
cerror = cudaMemcpyAsync (gpuPack.getMemory(),
cpuPinnedPack,
gpuPack.getNumElements() * sizeof(float),
cudaMemcpyHostToDevice,
cudaStream) ;
if (cerror != cudaSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"cudaMemcpyAsnyc : '%s'", cudaGetErrorString(cerror)) ;
}
#endif
}
item->borrowed = false ;
item->state = Batch::Item::ready ;
waitCompletion.notify_all() ;
}
void Batch::setAverageImage(float const * image)
{
if (image == NULL) {
if (averageImage) {
free(averageImage) ;
averageImage = NULL ;
}
return ;
}
assert (resizeMethod == fixedSize) ;
averageImage = (float*)malloc(sizeof(float) * resizeHeight * resizeWidth * 3) ;
memcpy(averageImage, image, sizeof(float) * resizeHeight * resizeWidth * 3) ;
}
void Batch::clear()
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Stop threads from getting more tasks. After this any call to borrowItem() by a worker will
// stop in a waiting state. Thus, we simply wait for all of them to return their items.
nextItem = (int)items.size() ;
// Wait for all thread to return their items
for (int i = 0 ; i < items.size() ; ++i) {
while (items[i]->borrowed) {
waitCompletion.wait(mutex) ;
}
}
for (int i = 0 ; i < items.size() ; ++i) {
delete items[i] ;
}
items.clear() ;
// Clear average image
setAverageImage(NULL) ;
// At the end of the current (empty) list
nextItem = 0 ;
numReturnedItems = 0 ;
}
void Batch::sync() const
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
// Wait for threads to complete work for all items.
// Note that it is not enough to check that threads are all in a
// "done" state as this does not mean that all work has been done yet.
// Instead, we look at the number of items returned.
while (numReturnedItems < items.size()) {
waitCompletion.wait(mutex) ;
}
if (gpuMode) {
#if ENABLE_GPU
cudaError_t cerror ;
cerror = cudaStreamSynchronize(cudaStream) ;
if (cerror != cudaSuccess) {
LOGERROR << "CUDA error while synchronizing a stream: '" << cudaGetErrorString(cerror) << '\'' ;
}
#endif
}
}
vl::ErrorCode Batch::registerItem(std::string const & name)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
Item * item = new Item(*this) ;
item->index = (int)items.size() ;
item->name = name ;
item->state = Item::prefetch ;
items.push_back(item) ;
return vl::VLE_Success ;
}
void Batch::setGpuMode(bool gpu)
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
#if ENABLE_GPU
if (gpu) {
cudaGetDevice(&gpuDevice) ;
if (!cudaStreamInitialized) {
cudaError_t cerror ;
cerror = cudaStreamCreateWithFlags(&cudaStream, cudaStreamNonBlocking) ;
if (cerror != cudaSuccess) {
LOGERROR
<< "CUDA error while creating a stream '"
<< cudaGetErrorString(cerror) << '\"' ;
} else {
cudaStreamInitialized = true ;
}
}
}
#endif
gpuMode = gpu ;
}
void Batch::setResizeMethod(Batch::ResizeMethod method, int height, int width)
{
resizeMethod = method ;
resizeHeight = height ;
resizeWidth = width ;
}
void Batch::setPackingMethod(Batch::PackingMethod method)
{
assert(method == individualArrays || method == singleArray) ;
packingMethod = method ;
}
Batch::PackingMethod Batch::getPackingMethod() const
{
return packingMethod ;
}
void Batch::setAverage(double average [])
{
::memcpy(this->average, average, sizeof(this->average)) ;
}
void Batch::setColorDeviation(double brightness [], double contrast, double saturation)
{
::memcpy(brightnessDeviation, brightness, sizeof(brightnessDeviation)) ;
contrastDeviation = contrast ;
saturationDeviation = saturation ;
}
void Batch::setFilterType(vl::impl::ImageResizeFilter::FilterType type)
{
filterType = type ;
}
void Batch::setFlipMode(bool x)
{
flipMode = x ;
}
void Batch::setCropAnisotropy(double minAnisotropy, double maxAnisotropy)
{
assert(minAnisotropy <= maxAnisotropy) ;
assert(0.0 <= minAnisotropy && minAnisotropy <= 1.0) ;
minCropAnisotropy = minAnisotropy ;
maxCropAnisotropy = maxAnisotropy ;
}
void Batch::setCropSize(double minSize, double maxSize)
{
assert(minSize <= maxSize) ;
assert(0.0 <= minSize && minSize <= 1.0) ;
assert(0.0 <= maxSize && maxSize <= 1.0) ;
minCropSize = minSize ;
maxCropSize = maxSize ;
}
void Batch::setCropLocation(CropLocation location)
{
assert(location == cropCenter || location == cropRandom) ;
cropLocation = location ;
}
//void Batch::getItemTransformation(Item * item)
//{
//
//}
vl::ErrorCode Batch::prefetch()
{
// Prod and then wait for reader threads to initialize the shape of the images
// and then perform the requried allocations.
waitNextItemToBorrow.notify_all() ;
sync() ;
// In packing mode, preallocate all memory here.
if (packingMethod == singleArray) {
assert(resizeMethod == fixedSize) ;
vl::TensorShape shape(resizeHeight, resizeWidth, 3, getNumberOfItems()) ;
if (gpuMode) {
#if ENABLE_GPU
gpuPack.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
gpuPack.makePersistent() ;
size_t memSize = shape.getNumElements() * sizeof(float) ;
if (cpuPinnedPackSize < memSize) {
if (cpuPinnedPack) {
cudaFreeHost(cpuPinnedPack) ;
}
cudaMallocHost(&cpuPinnedPack, memSize) ;
cpuPinnedPackSize = memSize ;
}
#endif
} else {
cpuPack.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
cpuPack.makePersistent() ;
}
}
// Get ready to reprocess all items.
nextItem = 0 ;
numReturnedItems = 0 ;
for (int i = 0 ; i < getNumberOfItems() ; ++ i) {
Batch::Item * item = getItem(i) ;
if (item->error == vl::VLE_Success) {
if (verbosity >= 2) {
mexPrintf("%20s: %d x %d x %d\n", item->name.c_str(), item->shape.width, item->shape.height, item->shape.depth) ;
}
} else {
mexPrintf("%20s: error '%s'\n", item->name.c_str(), item->errorMessage) ;
}
// Determine the shape of (height and width) of the output image. This is either
// the same as the input image, or with a fixed size for the shortest side,
// or a fixed size for both sides.
int outputHeight ;
int outputWidth ;
double cropHeight ;
double cropWidth ;
int dx ;
int dy ;
switch (resizeMethod) {
case noResize:
outputHeight = (int)item->shape.height ;
outputWidth = (int)item->shape.width ;
break ;
case resizeShortestSide: {
double scale1 = (double)resizeHeight / item->shape.width ;
double scale2 = (double)resizeHeight / item->shape.height ;
double scale = std::max(scale1, scale2) ;
outputHeight = std::max(1.0, round(scale * item->shape.height)) ;
outputWidth = std::max(1.0, round(scale * item->shape.width)) ;
break ;
}
case fixedSize:
outputHeight = resizeHeight ;
outputWidth = resizeWidth ;
break ;
}
// Determine the aspect ratio of the crop in the input image.
{
double anisotropyRatio = 1.0 ;
if (minCropAnisotropy == 0 || maxCropAnisotropy == 0) {
// Stretch crop to have the same shape as the input.
double inputAspect = (double)item->shape.width / item->shape.height ;
double outputAspect = (double)outputWidth / outputHeight ;
anisotropyRatio = inputAspect / outputAspect ;
} else {
double z = (double)rand() / RAND_MAX ;
double a = log(maxCropAnisotropy) ;
double b = log(minCropAnisotropy) ;
anisotropyRatio = exp(z * (b - a) + a) ;
}
cropWidth = outputWidth * sqrt(anisotropyRatio) ;
cropHeight = outputHeight / sqrt(anisotropyRatio) ;
}
// Determine the crop size.
{
double scale = std::min(item->shape.width / cropWidth,
item->shape.height / cropHeight) ;
double z = (double)rand() / RAND_MAX ;
#if 1
double a = maxCropSize * maxCropSize ;
double b = minCropSize * minCropSize ;
double size = sqrt(z * (b - a) + a) ;
#else
double a = maxCropSize ;
double b = minCropSize ;
double size = z * (b - a) + a ;
#endif
cropWidth *= scale * size ;
cropHeight *= scale * size ;
}
cropWidth = std::min(round(cropWidth), (double)item->shape.width) ;
cropHeight = std::min(round(cropHeight), (double)item->shape.height) ;
// Determine the crop location.
{
dx = item->shape.width - cropWidth ;
dy = item->shape.height - cropHeight ;
switch (cropLocation) {
case cropCenter:
dx /= 2 ;
dy /= 2 ;
break ;
case cropRandom:
dx = rand() % (dx + 1) ;
dy = rand() % (dy + 1) ;
break ;
default:
LOGERROR << "cropLocation not set" ;
}
}
// Save.
item->outputWidth = outputWidth ;
item->outputHeight = outputHeight ;
item->outputNumChannels = (packingMethod == individualArrays) ? item->shape.depth : 3 ;
item->cropWidth = cropWidth ;
item->cropHeight = cropHeight ;
item->cropOffsetX = dx ;
item->cropOffsetY = dy ;
item->flip = flipMode && (rand() > RAND_MAX/2) ;
item->filterType = filterType ;
// Color processing.
item->saturationShift = 1. + saturationDeviation * (2.*(double)rand()/RAND_MAX - 1.) ;
item->contrastShift = 1. + contrastDeviation * (2.*(double)rand()/RAND_MAX - 1.) ;
{
int numChannels = item->outputNumChannels ;
double w [3] ;
for (int i = 0 ; i < numChannels ; ++i) { w[i] = vl::randn() ; }
for (int i = 0 ; i < numChannels ; ++i) {
item->brightnessShift[i] = 0. ;
for (int j = 0 ; j < numChannels ; ++j) {
item->brightnessShift[i] += brightnessDeviation[i + 3*j] * w[i] ;
}
}
}
LOG(2)
<< "input (" << item->shape.width << " x " << item->shape.height << " x " << item->shape.depth << ") "
<< "output (" << item->outputWidth << " x " << item->outputHeight << " x " << item->outputNumChannels << ") "
<< "crop (" << item->cropWidth << " x " << item->cropHeight << ") "
<< "offset (" << item->cropOffsetX << ", " << item->cropOffsetY << ")" ;
if (packingMethod == individualArrays) {
vl::TensorShape shape(outputHeight, outputWidth, item->outputNumChannels, 1) ;
item->cpuArray.init(vl::VLDT_CPU, vl::VLDT_Float, shape) ;
item->cpuArray.makePersistent() ;
if (gpuMode) {
item->gpuArray.init(vl::VLDT_GPU, vl::VLDT_Float, shape) ;
item->gpuArray.makePersistent() ;
}
}
// Ready to fetch
item->state = Item::fetch ;
}
// Notify that we are ready to fetch
{
tthread::lock_guard<tthread::mutex> lock(mutex) ;
waitNextItemToBorrow.notify_all() ;
}
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* ReaderTask */
/* ---------------------------------------------------------------- */
class ReaderTask
{
public:
ReaderTask() ;
~ReaderTask() { finalize() ; }
vl::ErrorCode init(Batch * batch, int index) ;
void finalize() ;
private:
int index ;
Batch * batch ;
tthread::thread * thread ;
vl::ImageReader * reader ;
static void threadEntryPoint(void * thing) ;
void entryPoint() ;
void * getBuffer(int index, size_t size) ;
int gpuDevice ;
private:
ReaderTask(ReaderTask const &) ;
ReaderTask & operator= (ReaderTask const &) ;
struct Buffer {
void * memory ;
size_t size ;
} buffers [2] ;
} ;
void ReaderTask::threadEntryPoint(void * thing)
{
((ReaderTask*)thing)->entryPoint() ;
}
ReaderTask::ReaderTask()
: batch(NULL), thread(NULL), reader(NULL)
{
memset(buffers, 0, sizeof(buffers)) ;
}
void * ReaderTask::getBuffer(int index, size_t size)
{
if (buffers[index].size < size) {
if (buffers[index].memory) {
free(buffers[index].memory) ;
}
buffers[index].memory = malloc(size) ;
buffers[index].size = size ;
}
return buffers[index].memory ;
}
void ReaderTask::entryPoint()
{
LOG(2) << "reader " << index << " task staring" ;
while (true) {
#if ENABLE_GPU
if (batch->gpuMode && batch->gpuDevice != gpuDevice) {
LOG(2) << "reader " << index << " setting GPU device" ;
cudaSetDevice(batch->gpuDevice) ;
cudaGetDevice(&gpuDevice) ;
}
#endif
Batch::Item * item = batch->borrowNextItem() ;
LOG(3) << "borrowed " << item ;
if (item == NULL) { break ; }
if (item->error != vl::VLE_Success) {
batch->returnItem(item) ;
continue ;
}
switch (item->state) {
case Batch::Item::prefetch: {
item->error = reader->readShape(item->shape, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
}
break ;
}
case Batch::Item::fetch: {
// Get the CPU buffer that will hold the pixels.
float * outputPixels;
if (batch->getPackingMethod() == Batch::individualArrays) {
outputPixels = (float*)item->cpuArray.getMemory() ;
} else {
if (batch->gpuMode) {
#if ENABLE_GPU
outputPixels = batch->cpuPinnedPack ;
#else
snprintf(item->errorMessage, sizeof(item->errorMessage), "GPU support not compiled.") ;
break;
#endif
} else {
outputPixels = (float*)batch->cpuPack.getMemory() ;
}
outputPixels += item->outputHeight*item->outputWidth*3*item->index ;
}
// Read full image.
float * inputPixels = (float*)getBuffer(0,
item->shape.height *
item->shape.width *
item->shape.depth * sizeof(float)) ;
item->error = reader->readPixels(inputPixels, item->name.c_str()) ;
if (item->error != vl::VLE_Success) {
snprintf(item->errorMessage, sizeof(item->errorMessage), "%s", reader->getLastErrorMessage()) ;
break ;
}
// Crop.
float * temp = (float*)getBuffer(1,
item->outputHeight *
item->shape.width *
item->shape.depth * sizeof(float)) ;
vl::impl::imageResizeVertical(temp, inputPixels,
item->outputHeight,
item->shape.height,
item->shape.width,
item->shape.depth,
item->cropHeight,
item->cropOffsetY,
false, // flip
item->filterType) ;
vl::impl::imageResizeVertical(outputPixels, temp,
item->outputWidth,
item->shape.width,
item->outputHeight,
item->shape.depth,
item->cropWidth,
item->cropOffsetX,
item->flip,
item->filterType) ;
// Postprocess colors.
{
size_t inputNumChannels = item->shape.depth ;
size_t K = item->outputNumChannels ;
size_t n = item->outputHeight*item->outputWidth ;
if (batch->averageImage) {
// If there is an average image, then subtract it now.
// Grayscale images are expanded here to color if needed.
// Withouth an average image,
// they are expanded later.
for (int k = inputNumChannels ; k < K ; ++k) {
::memcpy(outputPixels + n*k, outputPixels, sizeof(float) * n) ;
}
vl::impl::blas<vl::VLDT_CPU,vl::VLDT_Float>::axpy
(batch->context,
n * item->outputNumChannels,
-1.0f,
batch->averageImage, 1,
outputPixels, 1) ;
inputNumChannels = K ;
}
float dv [3] ;
float * channels [3] ;
for (int k = 0 ; k < K ; ++k) {
channels[k] = outputPixels + n * k ;
}
for (int k = 0 ; k < inputNumChannels ; ++k) {
dv[k] = (1. - 2. * item->contrastShift) *
(batch->average[k] + item->brightnessShift[k]);
if (item->contrastShift != 1.) {
float mu = 0.f ;
float const * pixel = channels[k] ;
float const * end = channels[k] + n ;
while (pixel != end) { mu += *pixel++ ; }
mu /= n ;
dv[k] -= (1.0 - item->contrastShift) * mu ;
}
}
{
float const * end = channels[0] + n ;
float v [3] ;
if (K == 3 && inputNumChannels == 3) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[1] + dv[1] ; mu += v[1] ;
v[2] = *channels[2] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else if (K == 3 && inputNumChannels == 1) {
float const a = item->contrastShift * item->saturationShift ;
float const b = item->contrastShift * (1. - item->saturationShift) / K ;
while (channels[0] != end) {
float mu = 0.f ;
v[0] = *channels[0] + dv[0] ; mu += v[0] ;
v[1] = *channels[0] + dv[1] ; mu += v[1] ;
v[2] = *channels[0] + dv[2] ; mu += v[2] ;
*channels[0]++ = a * v[0] + b * mu ;
*channels[1]++ = a * v[1] + b * mu ;
*channels[2]++ = a * v[2] + b * mu ;
}
} else {
float const a = item->contrastShift ;
while (channels[0] != end) {
float v = *channels[0] + dv[0] ;
*channels[0]++ = a * v ;
}
}
}
}
// Copy to GPU.
if (batch->getPackingMethod() == Batch::individualArrays && batch->gpuMode) {
#if ENABLE_GPU
cudaError_t cerror ;
cerror = cudaMemcpyAsync (item->gpuArray.getMemory(),
outputPixels,
item->gpuArray.getNumElements() * sizeof(float),
cudaMemcpyHostToDevice,
batch->cudaStream) ;
if (cerror != cudaSuccess) {
item->error = vl::VLE_Cuda ;
snprintf(item->errorMessage, sizeof(item->errorMessage),
"CUDA error while copying memory from host to device: '%s'", cudaGetErrorString(cerror)) ;
break ;
}
#endif
}
break ;
}
case Batch::Item::ready:
break ;
}
batch->returnItem(item) ;
}
LOG(2) << "reader " << index << " task quitting" ;
}
void ReaderTask::finalize()
{
LOG(2)<<"finalizing reader " << index ;
if (thread) {
if (thread->joinable()) {
thread->join() ;
}
delete thread ;
thread = NULL ;
}
for (int i = 0 ; i < sizeof(buffers)/sizeof(Buffer) ; ++i) {
if (buffers[i].memory) {
free(buffers[i].memory) ;
buffers[i].memory = NULL ;
buffers[i].size = 0 ;
}
}
if (reader) {
delete reader ;
reader = NULL ;
}
index = -1 ;
batch = NULL ;
}
vl::ErrorCode ReaderTask::init(Batch * batch, int index)
{
finalize() ;
this->batch = batch ;
this->index = index ;
thread = new tthread::thread(threadEntryPoint, this) ;
reader = new vl::ImageReader() ;
return vl::VLE_Success ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
vl::MexContext context ;
Batch batch(context) ;
bool batchIsInitialized = false ;
typedef std::vector<ReaderTask*> readers_t ;
readers_t readers ;
void atExit()
{
if (batchIsInitialized) {
batch.finalize() ;
batchIsInitialized = false ;
}
for (int r = 0 ; r < readers.size() ; ++r) {
readers[r]->finalize() ;
delete readers[r] ;
}
readers.clear() ;
}
/* ---------------------------------------------------------------- */
/* Cache */
/* ---------------------------------------------------------------- */
void mexFunction(int nout, mxArray *out[],
int nin, mxArray const *in[])
{
bool prefetch = false ;
bool gpuMode = false ;
int requestedNumThreads = readers.size() ;
int opt ;
int next = IN_END ;
mxArray const *optarg ;
Batch::PackingMethod packingMethod = Batch::individualArrays ;
Batch::ResizeMethod resizeMethod = Batch::noResize ;
int resizeWidth = -1 ;
int resizeHeight = -1 ;
vl::ErrorCode error ;
double average [3] = {0.} ;
vl::MexTensor averageImage(context) ;
double brightnessDeviation [9] = {0.} ;
double saturationDeviation = 0. ;
double contrastDeviation = 0. ;
bool flipMode = false ;
Batch::CropLocation cropLocation = Batch::cropCenter ;
double minCropSize = 1.0, maxCropSize = 1.0 ;
double minCropAnisotropy = 1.0, maxCropAnisotropy = 1.0 ;
vl::impl::ImageResizeFilter::FilterType filterType = vl::impl::ImageResizeFilter::kBilinear ;
verbosity = 0 ;
/* -------------------------------------------------------------- */
/* Check the arguments */
/* -------------------------------------------------------------- */
mexAtExit(atExit) ;
if (nin < 1) {
vlmxError(VLMXE_IllegalArgument, "There is less than one argument.") ;
}
while ((opt = vlmxNextOption (in, nin, options, &next, &optarg)) >= 0) {
switch (opt) {
case opt_verbose :
++ verbosity ;
break ;
case opt_prefetch :
prefetch = true ;
break ;
case opt_pack :
packingMethod = Batch::singleArray ;
break ;
case opt_gpu :
#ifndef ENABLE_GPU
vlmxError(VLMXE_IllegalArgument, "Not compiled with GPU support.") ;
#endif
gpuMode = true ;
break ;
case opt_num_threads :
requestedNumThreads = (int)mxGetScalar(optarg) ;
break ;
case opt_resize :
if (!vlmxIsPlainVector(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "RESIZE is not a plain vector.") ;
}
switch (mxGetNumberOfElements(optarg)) {
case 1 :
resizeMethod = Batch::resizeShortestSide ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[0] ;
break ;
case 2 :
resizeMethod = Batch::fixedSize ;
resizeHeight = (int)mxGetPr(optarg)[0] ;
resizeWidth = (int)mxGetPr(optarg)[1] ;
break;
default:
vlmxError(VLMXE_IllegalArgument, "RESIZE does not have one or two dimensions.") ;
break ;
}
if (resizeHeight < 1 || resizeWidth < 1) {
vlmxError(VLMXE_IllegalArgument, "An element of RESIZE is smaller than one.") ;
}
break ;
case opt_brightness: {
if (!vlmxIsPlainMatrix(optarg, -1, -1)) {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS is not a plain matrix.") ;
}
size_t n = mxGetNumberOfElements(optarg) ;
memset(brightnessDeviation, 0, sizeof(brightnessDeviation)) ;
if (n == 1) {
double x = mxGetPr(optarg)[0] ;
brightnessDeviation[0] = x;
brightnessDeviation[3] = x;
brightnessDeviation[8] = x;
} else if (n == 3) {
double const* x = mxGetPr(optarg) ;
brightnessDeviation[0] = x[0];
brightnessDeviation[3] = x[1];
brightnessDeviation[8] = x[2];
} else if (n == 9) {
memcpy(brightnessDeviation, mxGetPr(optarg), sizeof(brightnessDeviation)) ;
} else {
vlmxError(VLMXE_IllegalArgument, "BRIGHTNESS does not have 1, 3, or 9 elements.") ;
}
break ;
}
case opt_saturation: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "SATURATION is not in the [0,1] range..") ;
}
saturationDeviation = x ;
break ;
}
case opt_contrast: {
if (!vlmxIsPlainScalar(optarg)) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not a plain scalar.") ;
}
double x = mxGetPr(optarg)[0] ;
if (x < 0 || x > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CONTRAST is not in the [0,1] range..") ;
}
contrastDeviation = x ;
break ;
}
case opt_crop_anisotropy: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY is not a plain scalar or vector with two components.") ;
}
minCropAnisotropy = mxGetPr(optarg)[0] ;
maxCropAnisotropy = mxGetPr(optarg)[std::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropAnisotropy < 0.0 || minCropAnisotropy > maxCropAnisotropy) {
vlmxError(VLMXE_IllegalArgument, "CROPANISOTROPY values are not in the legal range.") ;
}
break ;
}
case opt_crop_size: {
if (!vlmxIsPlainScalar(optarg) && !vlmxIsPlainVector(optarg, 2)) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE is not a plain scalar or vector with two components.") ;
}
minCropSize = mxGetPr(optarg)[0] ;
maxCropSize = mxGetPr(optarg)[std::min((mwSize)1, mxGetNumberOfElements(optarg)-1)] ;
if (minCropSize < 0.0 || minCropSize > maxCropSize || maxCropSize > 1.0) {
vlmxError(VLMXE_IllegalArgument, "CROPSIZE values are not in the legal range.") ;
}
break ;
}
case opt_crop_location: {
if (!vlmxIsString(optarg, -1)) {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION is not a string") ;
}
if (vlmxCompareToStringI(optarg, "random") == 0) {
cropLocation = Batch::cropRandom ;
} else if (vlmxCompareToStringI(optarg, "center") == 0) {
cropLocation = Batch::cropCenter ;
} else {
vlmxError(VLMXE_IllegalArgument, "CROPLOCATION value unknown.") ;
}
break ;
}
case opt_subtract_average: {
if (vlmxIsVector(optarg,1) || vlmxIsVector(optarg, 3)) {
size_t n = mxGetNumberOfElements(optarg) ;
switch (mxGetClassID(optarg)) {
case mxSINGLE_CLASS: {
float * x = (float*)mxGetData(optarg) ;
average[0] = x[std::min((size_t)0,n-1)] ;
average[1] = x[std::min((size_t)1,n-1)] ;
average[2] = x[std::min((size_t)2,n-1)] ;
break ;
}
case mxDOUBLE_CLASS: {
double * x = mxGetPr(optarg) ;
average[0] = (float)x[std::min((size_t)0,n-1)] ;
average[1] = (float)x[std::min((size_t)1,n-1)] ;
average[2] = (float)x[std::min((size_t)2,n-1)] ;
break ;
}
default:
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not SINGLE or DOUBLE vector.") ;
}
} else {
if (mxGetClassID(optarg) != mxSINGLE_CLASS ||
mxGetNumberOfDimensions(optarg) > 3) {
vlmxError(VLMXE_IllegalArgument, "SUBTRACTAVERAGE is not a SINGLE image of a compatible shape.") ;
}
averageImage.init(optarg) ;
}
break ;
}
case opt_flip: {
flipMode = true ;
break ;
}
case opt_interpolation: {
if (!vlmxIsString(optarg,-1)) {
vlmxError(VLMXE_IllegalArgument, "INTERPOLATION is not a string.") ;
}
if (vlmxIsEqualToStringI(optarg, "box")) {
filterType = vl::impl::ImageResizeFilter::kBox ;
} else if (vlmxIsEqualToStringI(optarg, "bilinear")) {
filterType = vl::impl::ImageResizeFilter::kBilinear ;
} else if (vlmxIsEqualToStringI(optarg, "bicubic")) {
filterType = vl::impl::ImageResizeFilter::kBicubic ;
} else if (vlmxIsEqualToStringI(optarg, "lanczos2")) {
filterType = vl::impl::ImageResizeFilter::kLanczos2 ;
} else if (vlmxIsEqualToStringI(optarg, "lanczos3")) {
filterType = vl::impl::ImageResizeFilter::kLanczos3 ;
} else {
vlmxError(VLMXE_IllegalArgument, "INTERPOLATION is not a supported method.") ;
}
break;
break ;
}
}
}
if (averageImage) {
if (resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "Cannot subtract an average image unless RESIZE is used to set the size of the output.") ;
}
if (averageImage.getNumDimensions() != 3 ||
averageImage.getHeight() != resizeHeight ||
averageImage.getWidth() != resizeWidth ||
averageImage.getDepth() !=3) {
vlmxError(VLMXE_IllegalArgument, "The average image is not a RESIZEHEIGHT x RESIZEWIDTH x 3 array.") ;
}
}
/* -------------------------------------------------------------- */
/* Do the work */
/* -------------------------------------------------------------- */
if (!mxIsCell(in[IN_FILENAMES])) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES is not a cell array of strings.") ;
}
// If the requested number of threads changes, finalize everything
requestedNumThreads = std::max(requestedNumThreads, 1) ;
if (readers.size() != requestedNumThreads) {
atExit() ; // Delete threads and current batch
}
// Prepare batch.
if (!batchIsInitialized) {
error = batch.init() ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not initialize a batch structure") ;
}
batchIsInitialized = true ;
}
// Prepare reader tasks.
for (int r = readers.size() ; r < requestedNumThreads ; ++r) {
readers.push_back(new ReaderTask()) ;
vl::ErrorCode error = readers[r]->init(&batch, r) ;
if (error != vl::VLE_Success) {
vlmxError(VLMXE_Execution, "Could not create the requested number of threads") ;
}
}
// Extract filenames as strings.
bool sameAsPrefeteched = true ;
std::vector<std::string> filenames ;
for (int i = 0 ; i < (int)mxGetNumberOfElements(in[IN_FILENAMES]) ; ++i) {
mxArray* filenameArray = mxGetCell(in[IN_FILENAMES], i) ;
if (!vlmxIsString(filenameArray,-1)) {
vlmxError(VLMXE_IllegalArgument, "FILENAMES contains an entry that is not a string.") ;
}
char filename [512] ;
mxGetString (filenameArray, filename, sizeof(filename)/sizeof(char)) ;
filenames.push_back(std::string(filename)) ;
sameAsPrefeteched &= (i < batch.getNumberOfItems() && batch.getItem(i)->name == filenames[i]) ;
}
// If the list of names is not the same as the prefetched ones,
// start a new cycle.
if (!sameAsPrefeteched) {
batch.clear() ;
// Check compatibility of options
if (packingMethod == Batch::singleArray && resizeMethod != Batch::fixedSize) {
vlmxError(VLMXE_IllegalArgument, "PACK must be used in combination with resizing to a fixed size.") ;
}
if (verbosity >= 2) {
mexPrintf("vl_imreadjpeg: gpu mode: %s\n", gpuMode?"yes":"no") ;
mexPrintf("vl_imreadjpeg: crop anisotropy: [%.1g, %.1g]\n",
minCropAnisotropy, maxCropAnisotropy) ;
mexPrintf("vl_imreadjpeg: crop size: [%.1g, %.1g]\n",
minCropSize, maxCropSize) ;
}
batch.setResizeMethod(resizeMethod, resizeHeight, resizeWidth) ;
batch.setPackingMethod(packingMethod) ;
batch.setGpuMode(gpuMode) ;
batch.setFlipMode(flipMode) ;
batch.setCropLocation(cropLocation) ;
batch.setCropAnisotropy(minCropAnisotropy, maxCropAnisotropy) ;
batch.setCropSize(minCropSize, maxCropSize) ;
batch.setColorDeviation(brightnessDeviation,
contrastDeviation,
saturationDeviation) ;
batch.setAverage(average) ;
if (averageImage) {
batch.setAverageImage((float const*)averageImage.getMemory()) ;
}
batch.setFilterType(filterType) ;
for (int i = 0 ; i < filenames.size() ; ++ i) {
batch.registerItem(filenames[i]) ;
}
batch.prefetch() ;
}
// Done if prefetching only.
if (prefetch) { return ; }
// Return result.
batch.sync() ;
switch (batch.getPackingMethod()) {
case Batch::singleArray: {
mwSize dims [] = {1,1} ;
out[OUT_IMAGES] = mxCreateCellArray(2, dims) ;
mxSetCell(out[OUT_IMAGES], 0, batch.relinquishArray()) ;
break ;
}
case Batch::individualArrays:
out[OUT_IMAGES] = mxCreateCellArray(mxGetNumberOfDimensions(in[IN_FILENAMES]),
mxGetDimensions(in[IN_FILENAMES])) ;
for (int i = 0 ; i < batch.getNumberOfItems() ; ++i) {
Batch::Item * item = batch.getItem(i) ;
if (item->error != vl::VLE_Success) {
vlmxWarning(VLMXE_Execution, "could not read image '%s' because '%s'",
item->name.c_str(),
item->errorMessage) ;
} else {
mxSetCell(out[OUT_IMAGES], i, item->relinquishArray()) ;
}
}
break ;
}
// Finalize.
batch.clear() ;
}
|
fa4be7533e5b395fb0a94275c602b1f82a47c74a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "organized_point_cloud.h"
#include <iostream>
#include <stdio.h>
#include <util/helper_math.h>
namespace dart {
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float &fx = cameraParams[0];
const float &fy = cameraParams[1];
const float &cx = cameraParams[2];
const float &cy = cameraParams[3];
const float &k1 = cameraParams[4];
const float &k2 = cameraParams[5];
const float &p1 = cameraParams[6];
const float &p2 = cameraParams[7];
const float &k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float& fx = cameraParams[0];
const float& fy = cameraParams[1];
const float& cx = cameraParams[2];
const float& cy = cameraParams[3];
const float& k1 = cameraParams[4];
const float& k2 = cameraParams[5];
const float& p1 = cameraParams[6];
const float& p2 = cameraParams[7];
const float& k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
__global__ void gpu_verticesToNormals(const float4 * vertIn,
float4 * normOut,
const int width,
const int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
const float4 & v = vertIn[index];
// // don't process invalid vertices
if ( v.w == 0) {
normOut[index] = make_float4(0);
return;
}
const float4 & vLeft = vertIn[ x == 0 ? index : index-1];
const float4 & vRight = vertIn[ x == width-1 ? index : index+1];
const float4 & vUp = vertIn[ y == 0 ? index : index-width];
const float4 & vDown = vertIn[ y == height-1 ? index : index+width];
const float3 vX = make_float3( (vRight.w == 0 ? v : vRight) - (vLeft.w == 0 ? v : vLeft) );
const float3 vY = make_float3( (vDown.w == 0 ? v : vDown) - (vUp.w == 0 ? v : vUp) );
const float3 n = cross(vY,vX);
const float len2 = dot(n,n);
if (len2 > 0) {
const float invLen = 1.0f / (float)sqrtf(len2);
normOut[index] = make_float4(n.x*invLen,n.y*invLen,n.z*invLen,1);
}
else {
normOut[index] = make_float4(0);
}
}
__global__ void gpu_eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
// check normal threshold
const float4& n = norms[index];
if (dot(make_float3(n),planeNormal) < epsNorm) {
return;
}
// check distance threshold
if (abs(dot(make_float3(v),planeNormal) - planeD) < epsDist ) {
v.w = -1;
}
}
__global__ void gpu_cropBox(float4 * verts, const int width, const int height, const float3 boxMin, const float3 boxMax) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
if (v.x < boxMin.x || v.x > boxMax.x ||
v.y < boxMin.y || v.y > boxMax.y ||
v.z < boxMin.z || v.z > boxMax.z) {
v.w = -1;
}
}
__global__ void gpu_maskPointCloud(float4* verts, const int width, const int height, const int* mask) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
int m = mask[index];
if (m == 0) {
verts[index].w = -1;
}
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, pp, fl, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, pp, fl, range, scale);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices<DepthType,5>), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, calibrationParams, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices<DepthType,5>), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, calibrationParams, range, scale);
}
void verticesToNormals(const float4 * vertIn, float4 * normOut, const int width, const int height) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_verticesToNormals), dim3(grid),dim3(block), 0, 0, vertIn,normOut,width,height);
}
void eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_eliminatePlane), dim3(grid),dim3(block), 0, 0, verts,norms,width,height,planeNormal,planeD,epsDist,epsNorm);
}
void cropBox(float4 * verts, const int width, const int height, const float3 & boxMin, const float3 & boxMax) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_cropBox), dim3(grid),dim3(block), 0, 0, verts,width,height,boxMin,boxMax);
}
void maskPointCloud(float4 * verts, const int width, const int height, const int * mask) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_maskPointCloud), dim3(grid),dim3(block), 0, 0, verts,width,height,mask);
}
#define COMPILE_DEPTH_TYPE(type) \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range, const float scale);
COMPILE_DEPTH_TYPE(float)
COMPILE_DEPTH_TYPE(ushort)
}
| fa4be7533e5b395fb0a94275c602b1f82a47c74a.cu | #include "organized_point_cloud.h"
#include <iostream>
#include <stdio.h>
#include <util/helper_math.h>
namespace dart {
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float &fx = cameraParams[0];
const float &fy = cameraParams[1];
const float &cx = cameraParams[2];
const float &cy = cameraParams[3];
const float &k1 = cameraParams[4];
const float &k2 = cameraParams[5];
const float &p1 = cameraParams[6];
const float &p2 = cameraParams[7];
const float &k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float& fx = cameraParams[0];
const float& fy = cameraParams[1];
const float& cx = cameraParams[2];
const float& cy = cameraParams[3];
const float& k1 = cameraParams[4];
const float& k2 = cameraParams[5];
const float& p1 = cameraParams[6];
const float& p2 = cameraParams[7];
const float& k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
__global__ void gpu_verticesToNormals(const float4 * vertIn,
float4 * normOut,
const int width,
const int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
const float4 & v = vertIn[index];
// // don't process invalid vertices
if ( v.w == 0) {
normOut[index] = make_float4(0);
return;
}
const float4 & vLeft = vertIn[ x == 0 ? index : index-1];
const float4 & vRight = vertIn[ x == width-1 ? index : index+1];
const float4 & vUp = vertIn[ y == 0 ? index : index-width];
const float4 & vDown = vertIn[ y == height-1 ? index : index+width];
const float3 vX = make_float3( (vRight.w == 0 ? v : vRight) - (vLeft.w == 0 ? v : vLeft) );
const float3 vY = make_float3( (vDown.w == 0 ? v : vDown) - (vUp.w == 0 ? v : vUp) );
const float3 n = cross(vY,vX);
const float len2 = dot(n,n);
if (len2 > 0) {
const float invLen = 1.0f / (float)sqrtf(len2);
normOut[index] = make_float4(n.x*invLen,n.y*invLen,n.z*invLen,1);
}
else {
normOut[index] = make_float4(0);
}
}
__global__ void gpu_eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
// check normal threshold
const float4& n = norms[index];
if (dot(make_float3(n),planeNormal) < epsNorm) {
return;
}
// check distance threshold
if (abs(dot(make_float3(v),planeNormal) - planeD) < epsDist ) {
v.w = -1;
}
}
__global__ void gpu_cropBox(float4 * verts, const int width, const int height, const float3 boxMin, const float3 boxMax) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
if (v.x < boxMin.x || v.x > boxMax.x ||
v.y < boxMin.y || v.y > boxMax.y ||
v.z < boxMin.z || v.z > boxMax.z) {
v.w = -1;
}
}
__global__ void gpu_maskPointCloud(float4* verts, const int width, const int height, const int* mask) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
int m = mask[index];
if (m == 0) {
verts[index].w = -1;
}
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<<<grid,block>>>(depthIn, vertOut, width, height, pp, fl, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<<<grid,block>>>(depthIn, vertOut, width, height, pp, fl, range, scale);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<DepthType,5><<<grid,block>>>(depthIn, vertOut, width, height, calibrationParams, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<DepthType,5><<<grid,block>>>(depthIn, vertOut, width, height, calibrationParams, range, scale);
}
void verticesToNormals(const float4 * vertIn, float4 * normOut, const int width, const int height) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_verticesToNormals<<<grid,block>>>(vertIn,normOut,width,height);
}
void eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_eliminatePlane<<<grid,block>>>(verts,norms,width,height,planeNormal,planeD,epsDist,epsNorm);
}
void cropBox(float4 * verts, const int width, const int height, const float3 & boxMin, const float3 & boxMax) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_cropBox<<<grid,block>>>(verts,width,height,boxMin,boxMax);
}
void maskPointCloud(float4 * verts, const int width, const int height, const int * mask) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_maskPointCloud<<<grid,block>>>(verts,width,height,mask);
}
#define COMPILE_DEPTH_TYPE(type) \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range, const float scale);
COMPILE_DEPTH_TYPE(float)
COMPILE_DEPTH_TYPE(ushort)
}
|
c3debfa3b38a0ef578e311e841002517816cef01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#include "common_magmasparse.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgecsrreimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=rowidx[row]; j<rowidx[row+1]; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in CSR format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgecsrreimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgecsrreimsplit_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
| c3debfa3b38a0ef578e311e841002517816cef01.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#include "common_magmasparse.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgecsrreimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=rowidx[row]; j<rowidx[row+1]; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in CSR format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgecsrreimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgecsrreimsplit_kernel<<< grid, threads, 0, queue >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
63f90677761f92f56b18397c09984cfd7830c2f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_right;
int xdim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_right;
int ydim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_right;
int xdim1_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_right;
int ydim1_update_halo_kernel5_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_right*(y)+xdim0_update_halo_kernel5_plus_4_right*ydim0_update_halo_kernel5_plus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_right*(y)+xdim1_update_halo_kernel5_plus_4_right*ydim1_update_halo_kernel5_plus_4_right*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(-4,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(-4,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,89)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_4_right");
OPS_kernels[89].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_right_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_right_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_right_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[89].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 89;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 89;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 63f90677761f92f56b18397c09984cfd7830c2f6.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_plus_4_right;
int xdim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim0_update_halo_kernel5_plus_4_right;
int ydim0_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int xdim1_update_halo_kernel5_plus_4_right;
int xdim1_update_halo_kernel5_plus_4_right_h = -1;
__constant__ int ydim1_update_halo_kernel5_plus_4_right;
int ydim1_update_halo_kernel5_plus_4_right_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_right*(y)+xdim0_update_halo_kernel5_plus_4_right*ydim0_update_halo_kernel5_plus_4_right*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_right*(y)+xdim1_update_halo_kernel5_plus_4_right*ydim1_update_halo_kernel5_plus_4_right*(z))
//user function
__device__
inline void update_halo_kernel5_plus_4_right_gpu(double *vol_flux_z, double *mass_flux_z, const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(-4,0,0)]);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(-4,0,0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_plus_4_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1*1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_plus_4_right_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,89)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_4_right");
OPS_kernels[89].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int) );
xdim0_update_halo_kernel5_plus_4_right_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int) );
ydim0_update_halo_kernel5_plus_4_right_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int) );
xdim1_update_halo_kernel5_plus_4_right_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int) );
ydim1_update_halo_kernel5_plus_4_right_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_4_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[89].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 89;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 89;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_4_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
bf8ee277a1b8e41e813d2fcf0b51b032cb023436.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MASW.cuh"
// Written by Joseph Kump (josek97@vt.edu). Last modified 12/20/2019
#define matrices(i,j,k) (matrices[i][j*size + k])
/* Generates the stiffness matrices and fills in their entries, then uses Gaussian
elimination to make finding their determinants easy. Mostly just allocates memory on
the GPU, then gets GPU kernels to do the heavy lifting.
Inputs:
curve the dispersion curve struct
d_matrices the stiffness matrices allocated on the GPU as a 2D array (each sub-array
corresponding to a matrix). These were already allocated.
Output:
void, but fills the stiffness matrices with Gaussian elimination in the d_matrices
parameter
*/
void MASWA_stiffness_matrix_CUDA(curve_t *curve, hipDoubleComplex **d_matrices){
// The matrix axes are based on the number of layers. Then the model parameters, test
// velocities, and dispersion curve wavelengths are allocated and transferred from
// the CPU:
int size = 2*(curve->n+1);
dfloat *d_c_test, *d_lambda, *d_h, *d_alpha, *d_beta, *d_rho;
hipMalloc(&d_c_test, curve->velocities_length*sizeof(dfloat));
hipMalloc(&d_lambda, curve->curve_length*sizeof(dfloat));
hipMalloc(&d_h, curve->n*sizeof(dfloat));
hipMalloc(&d_alpha, (curve->n+1)*sizeof(dfloat));
hipMalloc(&d_beta, (curve->n+1)*sizeof(dfloat));
hipMalloc(&d_rho, (curve->n+1)*sizeof(dfloat));
hipMemcpy(d_c_test, curve->c_test, curve->velocities_length*sizeof(dfloat), hipMemcpyHostToDevice);
hipMemcpy(d_lambda, curve->lambda_curve0, curve->curve_length*sizeof(dfloat), hipMemcpyHostToDevice);
hipMemcpy(d_h, curve->h, curve->n*sizeof(dfloat), hipMemcpyHostToDevice);
hipMemcpy(d_alpha, curve->alpha, (curve->n+1)*sizeof(dfloat), hipMemcpyHostToDevice);
hipMemcpy(d_beta, curve->beta, (curve->n+1)*sizeof(dfloat), hipMemcpyHostToDevice);
hipMemcpy(d_rho, curve->rho, (curve->n+1)*sizeof(dfloat), hipMemcpyHostToDevice);
// If the shared memory requirements for kernel_generate_stiffness_matrices are too large, then this can be reduced to 128:
int blockSize = 256;
int blocks = (curve->curve_length*curve->velocities_length / blockSize)+1;
// Check none of the test velocities are too close to the model velocities:
hipLaunchKernelGGL(( kernel_too_close), dim3(1),dim3(blockSize), 0, 0, curve->velocities_length, curve->n+1, d_c_test, d_alpha, d_beta, 0.0001);
// Fill in the stiffness matrices here:
hipLaunchKernelGGL(( kernel_generate_stiffness_matrices), dim3(blocks), dim3(blockSize), 6*blockSize*sizeof(hipDoubleComplex), 0, d_c_test, d_lambda, d_h, d_alpha, d_beta, d_rho, curve->n, curve->velocities_length, curve->curve_length, d_matrices);
// Gaussian Elimination here:
hipLaunchKernelGGL(( kernel_hepta_determinant_CUDA), dim3(blocks), dim3(blockSize), 4*blockSize*sizeof(hipDoubleComplex), 0, curve->curve_length, curve->velocities_length, size, d_matrices);
// Free all of the model parameters, since they are no longer necessary:
hipFree(d_c_test);
hipFree(d_lambda);
hipFree(d_h);
hipFree(d_alpha);
hipFree(d_beta);
hipFree(d_rho);
}
/* Fills in a simple identity matrix for some test cases. Not used as part of MASW (though
the parameters are modelled after it).
Inputs:
velocities_length determines the number of matrices
curve_length also determines the number of matrices
n determines the size of the matrices
matrices the array storing the matrix entries
Outputs:
void, but makes every matrix in matrices an identity matrix
*/
__global__ void kernel_matrix_fill_in_serial(int velocities_length, int curve_length, int n, hipDoubleComplex **matrices){
int size = 2*(n+1);
for (int i=0; i<curve_length*velocities_length; ++i){
for (int j=0; j<size; ++j){
matrices(i,j,j) = make_cuDoubleComplex(1, 0);
}
}
}
/* Assigns matrix pointers to the contiguous chunk of memory where they are stored. This is necessary for the implementation
utilizing cuBLAS (there may be a method that does not require a kernel, but other things I tried generated a seg fault).
Since cuBLAS is no longer used, the code may be rewritten to omit this. (Matrices can just be stored as a 1D array without
pointers, as opposed to a 2D array).
Inputs:
matrices the pointers to every matrix in the gpu data
data the 1D array containing all of the matrix entries
curve_length the length of the dispersion curve
velocities_length the number of test velocities. It and curve_length determine the
number of stiffness matrices
n the number of finite thickness layers, determines the size of
each stiffness matrix
Output:
assignes each stiffness matrix as a pointer in matrices
*/
__global__ void kernel_assign_matrices_to_data(hipDoubleComplex **matrices, hipDoubleComplex *data, int curve_length, int velocities_length, int n){
// Currently this kernel is serial, but it's effect on the runtime is negligible.
// Could be parallelized, or the code could be written so the GPU matrices are just all
// on a big 1D array:
for (int i=0; i<curve_length*velocities_length; ++i){
matrices[i] = (hipDoubleComplex*) ((char*) data+i*((size_t)(4*n*n + 8*n + 4))*sizeof(hipDoubleComplex));
}
}
/* Performs Gaussian elimination on the stiffness matrices by taking advantage of their
banded structure, unlike in the cuBLAS function. This version puts the current row
used for elimination into shared memory, improving the speed of accessing it.
Inputs:
curve_length the length of the dispersion curve
velocities_length the number of test velocities
size the length of the axis for each stiffness matrix
matrices the 2D array holding the matrices' entries
Outputs:
void, but performs gaussian elimination on each stiffness matrix so their determines
can easily be computed
*/
__global__ void kernel_hepta_determinant_CUDA(int curve_length, int velocities_length, int size, hipDoubleComplex **matrices){
// The block and thread indices are used to allocate stiffness matrices to each thread:
int blockSize = blockDim.x;
int threadIndex = threadIdx.x;
int index = blockSize * blockIdx.x + threadIndex;
int stride = blockSize * gridDim.x;
// Each row used for reduction has only 4 nonzero entries, so we only need 4 entries
// in shared memory for each matrix:
int sharedIndex = 4*threadIndex;
extern __shared__ hipDoubleComplex row[];
for (int x=index; x<curve_length*velocities_length; x+=stride){
dfloat modifier = 1.0;
for (int i=0; i<size; ++i){
int end = i + 4;
if (end > size){
end = size;
}
for (int k=i; k<end; ++k){
row[sharedIndex + k - i] = matrices(x,i,k);
}
// Row switching
if (cuCreal(matrices(x,i,i)) == 0.0 && cuCimag(matrices(x,i,i)) == 0.0){
for (int s=i+1; s<end; ++s){
if(cuCreal(matrices(x,s,i)) != 0.0 || cuCimag(matrices(x,s,i)) != 0.0){
hipDoubleComplex temp;
for (int w=i; w<end; ++w){
temp = matrices(x,i,w);
matrices(x,i,w) = matrices(x,s,w);
matrices(x,s,w) = temp;
modifier *= -1;
break;
}
}
}
}
//Gaussian elimination for the three rows (or fewer) below this one:
for (int j=i+1; j<end; ++j){
hipDoubleComplex coeff = cuCdiv(matrices(x,j,i), row[sharedIndex]);
for (int k=i+1; k<end; ++k){
matrices(x,j,k) = cuCsub(matrices(x,j,k), cuCmul(coeff, row[sharedIndex + k - i]));
}
}
}
// account for determinant changing from row switching
matrices(x,0,0) = cuCmul(matrices(x,0,0), make_cuDoubleComplex(modifier, 0.0));
}
}
| bf8ee277a1b8e41e813d2fcf0b51b032cb023436.cu | #include "MASW.cuh"
// Written by Joseph Kump (josek97@vt.edu). Last modified 12/20/2019
#define matrices(i,j,k) (matrices[i][j*size + k])
/* Generates the stiffness matrices and fills in their entries, then uses Gaussian
elimination to make finding their determinants easy. Mostly just allocates memory on
the GPU, then gets GPU kernels to do the heavy lifting.
Inputs:
curve the dispersion curve struct
d_matrices the stiffness matrices allocated on the GPU as a 2D array (each sub-array
corresponding to a matrix). These were already allocated.
Output:
void, but fills the stiffness matrices with Gaussian elimination in the d_matrices
parameter
*/
void MASWA_stiffness_matrix_CUDA(curve_t *curve, cuDoubleComplex **d_matrices){
// The matrix axes are based on the number of layers. Then the model parameters, test
// velocities, and dispersion curve wavelengths are allocated and transferred from
// the CPU:
int size = 2*(curve->n+1);
dfloat *d_c_test, *d_lambda, *d_h, *d_alpha, *d_beta, *d_rho;
cudaMalloc(&d_c_test, curve->velocities_length*sizeof(dfloat));
cudaMalloc(&d_lambda, curve->curve_length*sizeof(dfloat));
cudaMalloc(&d_h, curve->n*sizeof(dfloat));
cudaMalloc(&d_alpha, (curve->n+1)*sizeof(dfloat));
cudaMalloc(&d_beta, (curve->n+1)*sizeof(dfloat));
cudaMalloc(&d_rho, (curve->n+1)*sizeof(dfloat));
cudaMemcpy(d_c_test, curve->c_test, curve->velocities_length*sizeof(dfloat), cudaMemcpyHostToDevice);
cudaMemcpy(d_lambda, curve->lambda_curve0, curve->curve_length*sizeof(dfloat), cudaMemcpyHostToDevice);
cudaMemcpy(d_h, curve->h, curve->n*sizeof(dfloat), cudaMemcpyHostToDevice);
cudaMemcpy(d_alpha, curve->alpha, (curve->n+1)*sizeof(dfloat), cudaMemcpyHostToDevice);
cudaMemcpy(d_beta, curve->beta, (curve->n+1)*sizeof(dfloat), cudaMemcpyHostToDevice);
cudaMemcpy(d_rho, curve->rho, (curve->n+1)*sizeof(dfloat), cudaMemcpyHostToDevice);
// If the shared memory requirements for kernel_generate_stiffness_matrices are too large, then this can be reduced to 128:
int blockSize = 256;
int blocks = (curve->curve_length*curve->velocities_length / blockSize)+1;
// Check none of the test velocities are too close to the model velocities:
kernel_too_close<<<1,blockSize>>>(curve->velocities_length, curve->n+1, d_c_test, d_alpha, d_beta, 0.0001);
// Fill in the stiffness matrices here:
kernel_generate_stiffness_matrices<<<blocks, blockSize, 6*blockSize*sizeof(cuDoubleComplex)>>>(d_c_test, d_lambda, d_h, d_alpha, d_beta, d_rho, curve->n, curve->velocities_length, curve->curve_length, d_matrices);
// Gaussian Elimination here:
kernel_hepta_determinant_CUDA<<<blocks, blockSize, 4*blockSize*sizeof(cuDoubleComplex)>>>(curve->curve_length, curve->velocities_length, size, d_matrices);
// Free all of the model parameters, since they are no longer necessary:
cudaFree(d_c_test);
cudaFree(d_lambda);
cudaFree(d_h);
cudaFree(d_alpha);
cudaFree(d_beta);
cudaFree(d_rho);
}
/* Fills in a simple identity matrix for some test cases. Not used as part of MASW (though
the parameters are modelled after it).
Inputs:
velocities_length determines the number of matrices
curve_length also determines the number of matrices
n determines the size of the matrices
matrices the array storing the matrix entries
Outputs:
void, but makes every matrix in matrices an identity matrix
*/
__global__ void kernel_matrix_fill_in_serial(int velocities_length, int curve_length, int n, cuDoubleComplex **matrices){
int size = 2*(n+1);
for (int i=0; i<curve_length*velocities_length; ++i){
for (int j=0; j<size; ++j){
matrices(i,j,j) = make_cuDoubleComplex(1, 0);
}
}
}
/* Assigns matrix pointers to the contiguous chunk of memory where they are stored. This is necessary for the implementation
utilizing cuBLAS (there may be a method that does not require a kernel, but other things I tried generated a seg fault).
Since cuBLAS is no longer used, the code may be rewritten to omit this. (Matrices can just be stored as a 1D array without
pointers, as opposed to a 2D array).
Inputs:
matrices the pointers to every matrix in the gpu data
data the 1D array containing all of the matrix entries
curve_length the length of the dispersion curve
velocities_length the number of test velocities. It and curve_length determine the
number of stiffness matrices
n the number of finite thickness layers, determines the size of
each stiffness matrix
Output:
assignes each stiffness matrix as a pointer in matrices
*/
__global__ void kernel_assign_matrices_to_data(cuDoubleComplex **matrices, cuDoubleComplex *data, int curve_length, int velocities_length, int n){
// Currently this kernel is serial, but it's effect on the runtime is negligible.
// Could be parallelized, or the code could be written so the GPU matrices are just all
// on a big 1D array:
for (int i=0; i<curve_length*velocities_length; ++i){
matrices[i] = (cuDoubleComplex*) ((char*) data+i*((size_t)(4*n*n + 8*n + 4))*sizeof(cuDoubleComplex));
}
}
/* Performs Gaussian elimination on the stiffness matrices by taking advantage of their
banded structure, unlike in the cuBLAS function. This version puts the current row
used for elimination into shared memory, improving the speed of accessing it.
Inputs:
curve_length the length of the dispersion curve
velocities_length the number of test velocities
size the length of the axis for each stiffness matrix
matrices the 2D array holding the matrices' entries
Outputs:
void, but performs gaussian elimination on each stiffness matrix so their determines
can easily be computed
*/
__global__ void kernel_hepta_determinant_CUDA(int curve_length, int velocities_length, int size, cuDoubleComplex **matrices){
// The block and thread indices are used to allocate stiffness matrices to each thread:
int blockSize = blockDim.x;
int threadIndex = threadIdx.x;
int index = blockSize * blockIdx.x + threadIndex;
int stride = blockSize * gridDim.x;
// Each row used for reduction has only 4 nonzero entries, so we only need 4 entries
// in shared memory for each matrix:
int sharedIndex = 4*threadIndex;
extern __shared__ cuDoubleComplex row[];
for (int x=index; x<curve_length*velocities_length; x+=stride){
dfloat modifier = 1.0;
for (int i=0; i<size; ++i){
int end = i + 4;
if (end > size){
end = size;
}
for (int k=i; k<end; ++k){
row[sharedIndex + k - i] = matrices(x,i,k);
}
// Row switching
if (cuCreal(matrices(x,i,i)) == 0.0 && cuCimag(matrices(x,i,i)) == 0.0){
for (int s=i+1; s<end; ++s){
if(cuCreal(matrices(x,s,i)) != 0.0 || cuCimag(matrices(x,s,i)) != 0.0){
cuDoubleComplex temp;
for (int w=i; w<end; ++w){
temp = matrices(x,i,w);
matrices(x,i,w) = matrices(x,s,w);
matrices(x,s,w) = temp;
modifier *= -1;
break;
}
}
}
}
//Gaussian elimination for the three rows (or fewer) below this one:
for (int j=i+1; j<end; ++j){
cuDoubleComplex coeff = cuCdiv(matrices(x,j,i), row[sharedIndex]);
for (int k=i+1; k<end; ++k){
matrices(x,j,k) = cuCsub(matrices(x,j,k), cuCmul(coeff, row[sharedIndex + k - i]));
}
}
}
// account for determinant changing from row switching
matrices(x,0,0) = cuCmul(matrices(x,0,0), make_cuDoubleComplex(modifier, 0.0));
}
}
|
db1f9047e0fa569510e8a9a8fb6833d00fa244b6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/device_functions.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <iostream>
__global__ void clip_by_minimum_kernel(int nbatch, float mini, int ochannel, float const* idata, float* odata)
{
int x0 = threadIdx.x;
if (idata[x0] < mini)
{
odata[x0] = mini;
}
else {
odata[x0] = idata[x0];
}
}
extern "C" void clipByMinimum(dim3 grid, dim3 block, hipStream_t stream, int nbatch, float mini, int ochannel, float const* idata,
float* odata)
{
hipLaunchKernelGGL(( clip_by_minimum_kernel) , dim3(grid), dim3(block), 0, stream , nbatch, mini, ochannel, idata, odata);
} | db1f9047e0fa569510e8a9a8fb6833d00fa244b6.cu | #include "device_functions.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <stdio.h>
#include <cuda.h>
#include <cublas.h>
#include <iostream>
__global__ void clip_by_minimum_kernel(int nbatch, float mini, int ochannel, float const* idata, float* odata)
{
int x0 = threadIdx.x;
if (idata[x0] < mini)
{
odata[x0] = mini;
}
else {
odata[x0] = idata[x0];
}
}
extern "C" void clipByMinimum(dim3 grid, dim3 block, cudaStream_t stream, int nbatch, float mini, int ochannel, float const* idata,
float* odata)
{
clip_by_minimum_kernel <<<grid, block, 0, stream >>>(nbatch, mini, ochannel, idata, odata);
} |
2140a946402fecb0d276fa74135b63e6fce0bb62.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define BLOCK_X 10
#define BLOCK_Y 10
#define BLOCK_Z 1
#define THREAD_X 6
#define THREAD_Y 6
#define THREAD_Z 1
#define N 3600
#define PI 3.14159265358979323846
#define DEG_TO_RAD(deg) ((deg) / 180.0 * (PI))
__global__ void cosine10_10_6_6(double *B_d, double *radius_d)
{
int blockId = (gridDim.x * blockIdx.y) + blockIdx.x;
int thread_index = (blockId * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
B_d[thread_index] = cos(radius_d[thread_index]);
}
int main()
{
int i;
double B[N]; // HOST
double radius[N]; // HOST
double *B_d; // DEVICE
double *radius_d; // DEVICE
double deg = 0.0;
FILE *outputfile;
outputfile = fopen("./outputs/cosine10_10_6_6_cos.txt", "w");
if (outputfile == NULL) {
printf("cannot open either directory or file! \n");
exit(1);
}
for (int i = 0; i < N; i+=1) {
radius[i] = DEG_TO_RAD(deg);
deg += 360 /(double) N;
}
dim3 blocks(BLOCK_X,BLOCK_Y,BLOCK_Z);
dim3 threads(THREAD_X,THREAD_Y,THREAD_Z);
hipMalloc( (void**) &B_d, N*sizeof(double));
hipMalloc( (void**) &radius_d, N*sizeof(double));
hipMemcpy(B_d, B, N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(radius_d, radius, N*sizeof(double), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cosine10_10_6_6), dim3(blocks), dim3(threads) , 0, 0, B_d, radius_d);
hipMemcpy(B, B_d, N*sizeof(double), hipMemcpyDeviceToHost);
for(i = 0; i < N; i += 1){
fprintf(outputfile,"%d %.16f\n",i, B[i]);
}
for(i = 0; i < 5; i += 1){
printf("%d %.16f\n",i, B[i]);
}
fclose(outputfile);
hipFree(B_d);
hipFree(radius_d);
return 0;
}
| 2140a946402fecb0d276fa74135b63e6fce0bb62.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define BLOCK_X 10
#define BLOCK_Y 10
#define BLOCK_Z 1
#define THREAD_X 6
#define THREAD_Y 6
#define THREAD_Z 1
#define N 3600
#define PI 3.14159265358979323846
#define DEG_TO_RAD(deg) ((deg) / 180.0 * (PI))
__global__ void cosine10_10_6_6(double *B_d, double *radius_d)
{
int blockId = (gridDim.x * blockIdx.y) + blockIdx.x;
int thread_index = (blockId * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
B_d[thread_index] = cos(radius_d[thread_index]);
}
int main()
{
int i;
double B[N]; // HOST
double radius[N]; // HOST
double *B_d; // DEVICE
double *radius_d; // DEVICE
double deg = 0.0;
FILE *outputfile;
outputfile = fopen("./outputs/cosine10_10_6_6_cos.txt", "w");
if (outputfile == NULL) {
printf("cannot open either directory or file! \n");
exit(1);
}
for (int i = 0; i < N; i+=1) {
radius[i] = DEG_TO_RAD(deg);
deg += 360 /(double) N;
}
dim3 blocks(BLOCK_X,BLOCK_Y,BLOCK_Z);
dim3 threads(THREAD_X,THREAD_Y,THREAD_Z);
cudaMalloc( (void**) &B_d, N*sizeof(double));
cudaMalloc( (void**) &radius_d, N*sizeof(double));
cudaMemcpy(B_d, B, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(radius_d, radius, N*sizeof(double), cudaMemcpyHostToDevice);
cosine10_10_6_6<<< blocks, threads >>>(B_d, radius_d);
cudaMemcpy(B, B_d, N*sizeof(double), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i += 1){
fprintf(outputfile,"%d %.16f\n",i, B[i]);
}
for(i = 0; i < 5; i += 1){
printf("%d %.16f\n",i, B[i]);
}
fclose(outputfile);
cudaFree(B_d);
cudaFree(radius_d);
return 0;
}
|
dbd553cb5108eafd69f774f4a5af1a5d7c4d34aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
#include <cmath>
#include <hip/hip_runtime.h>
using namespace std;
using namespace cv;
#define maxthreads 256.0
#define maxblocks 256.0
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
uchar * filter(uchar * data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
__device__ int getGlobalIdx_3D_3D(int);
__device__ int getblockthreadIdx();
int* uchartoint(uchar* data, int size){
int* buff = (int*)malloc(sizeof(int)*size);
for(int i=0;i<size;i++){
*(buff+i)=(int)*(data+i);
}
return buff;
}
uchar* inttouchar(int* data, int size){
uchar* buff = (uchar*)malloc(sizeof(uchar)*size);
for(int i=0;i<size;i++){
*(buff+i)=(unsigned char)*(data+i);
}
return buff;
}
__global__
void convolution(int* data,int* buff,float* kernel,int* outputvars,int rows,int cols,int channels,int kerneldim,int baseblock){
int idx = getGlobalIdx_3D_3D(baseblock);
int kernelmid;
extern __shared__ float sharedKernel[];
float *kernelCenter;
if (getblockthreadIdx()<kerneldim*kerneldim){
*(sharedKernel+getblockthreadIdx())=*(kernel+getblockthreadIdx());
}
__syncthreads();
/*
if (getblockthreadIdx()<kerneldim*kerneldim){
printf("%d %f\n",getblockthreadIdx(),*(sharedKernel+getblockthreadIdx()));
}
__syncthreads();
*/
kernelmid = kerneldim%2==1?kerneldim/2:(kerneldim-1)/2;
kernelCenter=sharedKernel+(((kerneldim+1)*kernelmid));
int row = idx / (cols*channels);
int col = (idx%(cols*channels))/channels;
float value=0;
int pixel=0;
float kernelVal=0;
int pixelmin=INT_MAX,pixelmax=INT_MIN;
int kernelmidHalf=(kerneldim/2);
if (col>0 && row>0 && row<rows-1 && col<cols-1){
data = data+idx;
//r<=(kernelmidHalf) no funciona, no s porque, pero cuda y yo tenemos un problema.
for(int r = (-1*kernelmidHalf); r<(kernelmidHalf+1);r++){
for(int c = -1*kernelmidHalf; c<(kernelmidHalf+1);c++){
pixel=*(data+(r*cols*channels)+(c*channels));
kernelVal=*(kernelCenter+(r*-1*kerneldim)+(c*-1));
value+=kernelVal*pixel;
if (pixel<pixelmin){
pixelmin=pixel;
}
if (pixel>pixelmax){
pixelmax=pixel;
}
}
}
*(buff+idx)=value;
atomicMin(outputvars,value);
atomicMax(outputvars+1,value);
atomicMin(outputvars+2,pixelmin);
atomicMax(outputvars+3,pixelmax);
}
//__syncthreads();
/*if (col>0 && row>0 && row<rows-1 && col<cols-1 && getblockthreadIdx()==0){
printf("%d %d %d %d\n",*(outputvars),*(outputvars+1),*(outputvars+2),*(outputvars+3));
}*/
}
__global__
void normalize(int* data,int channels, int rows, int cols,int min, int max, int newMin, int newMax, int mode,int subset){
int pixval=0;
int i = getGlobalIdx_3D_3D(subset);
int row = i / (cols*channels);
int col = (i%(cols*channels))/channels;
if (row>0 && col>0 && row<rows-1 && col<cols-1){
pixval=*(data+i);
if (mode==1){
*(data+i)=(pixval-min)*((newMax-newMin*1.0)/(max-min))+newMin;
}else{
*(data+i)=pixval>newMax?newMax:pixval<newMin?newMin:pixval;
}
}
// __syncthreads();
}
uchar * edge1(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,0,-1},
{0,0,0},
{-1,0,1}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge2(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,1,0},
{1,-4,1},
{0,1,0}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge3(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{-1,-1,-1},
{-1,8,-1},
{-1,-1,-1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * sharpen(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,-1,0},
{-1,5,-1},
{0,-1,0},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * boxblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,1,1},
{1,1,1},
{1,1,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
uchar * gaussianblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,2,1},
{2,4,2},
{1,2,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
int * _sobelx(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,0,-1},
{2,0,-2},
{1,0,-1},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
uchar * sobelx(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobelx(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
int * _sobely(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,2,1},
{0,0,0},
{-1,-2,-1},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
int * _sobelx10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,0,-3},
{10,0,-10},
{3,0,-3},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
int * _sobely10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,10,3},
{0,0,0},
{-3,-10,-3},
};
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
hipFree(d_kernel);
return res;
}
uchar * sobely(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobely(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
__global__
void sobelKernel(int *a, int*b,int* output,int* outputvars,int n, int base){
int i = getGlobalIdx_3D_3D(base);
if (i>=n){return;}
int val=sqrtf((*(a+i))*(*(a+i))+(*(b+i))*(*(b+i)));
*(output+i)=val;
atomicMin(outputvars,val);
atomicMax(outputvars+1,val);
}
uchar * sobel(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
hipMalloc(&minmaxs,sizeof(int)*2);
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
hipMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx(d_data,channels,rows,cols,-1);
int * filtery = _sobely(d_data,channels,rows,cols,-1);
hipMemset(minmaxs,INT_MAX,1);
hipMemset(minmaxs+1,INT_MIN,1);
int N = rows*cols*channels;
int bloques = ceil(N/maxthreads);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
hipLaunchKernelGGL(( sobelKernel), dim3(maxblocks),dim3(maxthreads), 0, 0, filterx,filtery,d_output,minmaxs,N,sub*maxblocks);
}
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
hipMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, hipMemcpyDeviceToHost);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
hipLaunchKernelGGL(( normalize), dim3(maxblocks),dim3(maxthreads), 0, 0, d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1,sub*maxblocks);
}
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(minmaxs);
hipFree(d_data);
hipFree(d_output);
free(datai);
free(output);
hipFree(filterx);
hipFree(filtery);
//free(tmpMinMax);
return out;
}
uchar * sobel10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
hipMalloc(&minmaxs,sizeof(int)*2);
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
hipMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx10(d_data,channels,rows,cols,-1);
int * filtery = _sobely10(d_data,channels,rows,cols,-1);
hipMemset(minmaxs,INT_MAX,1);
hipMemset(minmaxs+1,INT_MIN,1);
int N = rows*cols*channels;
int bloques = ceil(N/maxthreads);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
hipLaunchKernelGGL(( sobelKernel), dim3(maxblocks),dim3(maxthreads), 0, 0, filterx,filtery,d_output,minmaxs,rows*cols*channels,sub*maxblocks);
}
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
hipMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, hipMemcpyDeviceToHost);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
hipLaunchKernelGGL(( normalize), dim3(maxblocks),dim3(maxthreads), 0, 0, d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1,sub*maxblocks);
}
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(minmaxs);
hipFree(d_data);
hipFree(d_output);
free(datai);
free(output);
hipFree(filterx);
hipFree(filtery);
free(tmpMinMax);
return out;
}
uchar * sobely10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobely10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
uchar * sobelx10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
int* d_output = _sobelx10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(output);
return out;
}
__global__
void kernelNormAdd(float* kernel,float* output, int kernelNormalize){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float kernelVal=*((float*)kernel+i);
atomicAdd(output+(kernelVal>=0),kernelVal);
__syncthreads();
if (kernelNormalize==1){
*(kernel+i)=kernelVal/(*output+*(output+1));
}else{
*(kernel+i)=kernelVal/(*(output+(kernelVal>=0)));
}
__syncthreads();
}
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* buff,*minmaxs;
hipMalloc(&buff,sizeof(int)*channels*rows*cols);
hipMalloc(&minmaxs,sizeof(int)*4);
hipMemset(buff,0,sizeof(int)*channels*rows*cols);
if (kernelNormalize){
float* sumKernel;
hipMalloc(&sumKernel,sizeof(float)*2);
hipMemset(sumKernel,0,sizeof(float)*2);
hipLaunchKernelGGL(( kernelNormAdd), dim3(1),dim3(9), 0, 0, kernel,sumKernel,kernelNormalize);
hipFree(sumKernel);
}
int N = rows*cols*channels;
int ssize = (sizeof(float)*kerneldim*kerneldim);
hipMemset(minmaxs,INT_MAX,1);
hipMemset(minmaxs+1,INT_MIN,1);
hipMemset(minmaxs+2,INT_MAX,1);
hipMemset(minmaxs+3,INT_MIN,1);
int bloques = ceil(N/maxthreads);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
hipLaunchKernelGGL(( convolution), dim3(maxblocks),dim3(maxthreads),ssize, 0, data,buff,kernel,minmaxs,rows,cols,channels,kerneldim,sub*maxblocks);
hipError_t err=hipGetLastError();
if ( hipSuccess != err ){
printf( "Error!\n" );
printf("GPUassert: %s\n", hipGetErrorString(err));
}
}
if (outputNormalizationMode>=0){
int* tmpMinMax = (int*)malloc(sizeof(int)*4);
hipMemcpy(tmpMinMax,minmaxs,sizeof(int)*4, hipMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3));
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
hipLaunchKernelGGL(( normalize), dim3(maxblocks),dim3(maxthreads), 0, 0, buff,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3),outputNormalizationMode,sub*maxblocks);
}
free(tmpMinMax);
}
hipFree(minmaxs);
return buff;
}
uchar* filter(uchar* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
hipMalloc(&d_data,sizeof(int)*channels*rows*cols);
hipMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,hipMemcpyHostToDevice);
float * d_kernel;
hipMalloc(&d_kernel,sizeof(float)*3*3);
hipMemcpy(d_kernel,kernel,sizeof(float)*3*3,hipMemcpyHostToDevice);
int* d_output = _filter(d_data,channels,rows,cols,d_kernel,kerneldim,kernelNormalize,outputNormalizationMode);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
hipMemcpy(output,d_output,sizeof(int)*rows*cols*channels, hipMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
hipFree(d_data);
hipFree(d_output);
free(datai);
free(output);
return out;
}
int main(int argc, char** argv){
if (argc<3){
cout<<"./nombre imagen filtro"<<endl;
return 0;
}
char* nfiltro=*(argv+2);
uchar* (*filtro)(uchar*,int,int,int)=0;
if(strcmp(nfiltro,"sobel")==0) filtro=sobel;
if(strcmp(nfiltro,"sobelx")==0) filtro=sobelx;
if(strcmp(nfiltro,"sobely")==0) filtro=sobely;
if(strcmp(nfiltro,"sobel10")==0) filtro=sobel10;
if(strcmp(nfiltro,"sobelx10")==0) filtro=sobelx10;
if(strcmp(nfiltro,"sobely10")==0) filtro=sobely10;
if(strcmp(nfiltro,"edge1")==0) filtro=edge1;
if(strcmp(nfiltro,"edge2")==0) filtro=edge2;
if(strcmp(nfiltro,"edge3")==0) filtro=edge3;
if(strcmp(nfiltro,"boxblur")==0) filtro=boxblur;
if(strcmp(nfiltro,"gaussianblur")==0) filtro=gaussianblur;
if(strcmp(nfiltro,"sharpen")==0) filtro=sharpen;
if (filtro==0){
cout<<"metodo erroneo"<<endl;
return 1;
}
Mat image;
image = imread(*(argv+1), CV_LOAD_IMAGE_COLOR);
Mat m1;
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
m1 = Mat (image);
m1.data=filtro(image.data,3,image.rows,image.cols);
namedWindow( "original", WINDOW_AUTOSIZE );
imshow( "original", image );
namedWindow( "filter", WINDOW_AUTOSIZE );
imshow( "filter", m1 );
waitKey(); // Wait for a keystroke in the window
return 0;
}
__device__ int getGlobalIdx_3D_3D(int base)
{
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
blockId+=base;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
__device__ int getblockthreadIdx(){
return (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
}
| dbd553cb5108eafd69f774f4a5af1a5d7c4d34aa.cu | #include <stdio.h>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
#include <cmath>
#include <cuda.h>
using namespace std;
using namespace cv;
#define maxthreads 256.0
#define maxblocks 256.0
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
uchar * filter(uchar * data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode);
__device__ int getGlobalIdx_3D_3D(int);
__device__ int getblockthreadIdx();
int* uchartoint(uchar* data, int size){
int* buff = (int*)malloc(sizeof(int)*size);
for(int i=0;i<size;i++){
*(buff+i)=(int)*(data+i);
}
return buff;
}
uchar* inttouchar(int* data, int size){
uchar* buff = (uchar*)malloc(sizeof(uchar)*size);
for(int i=0;i<size;i++){
*(buff+i)=(unsigned char)*(data+i);
}
return buff;
}
__global__
void convolution(int* data,int* buff,float* kernel,int* outputvars,int rows,int cols,int channels,int kerneldim,int baseblock){
int idx = getGlobalIdx_3D_3D(baseblock);
int kernelmid;
extern __shared__ float sharedKernel[];
float *kernelCenter;
if (getblockthreadIdx()<kerneldim*kerneldim){
*(sharedKernel+getblockthreadIdx())=*(kernel+getblockthreadIdx());
}
__syncthreads();
/*
if (getblockthreadIdx()<kerneldim*kerneldim){
printf("%d %f\n",getblockthreadIdx(),*(sharedKernel+getblockthreadIdx()));
}
__syncthreads();
*/
kernelmid = kerneldim%2==1?kerneldim/2:(kerneldim-1)/2;
kernelCenter=sharedKernel+(((kerneldim+1)*kernelmid));
int row = idx / (cols*channels);
int col = (idx%(cols*channels))/channels;
float value=0;
int pixel=0;
float kernelVal=0;
int pixelmin=INT_MAX,pixelmax=INT_MIN;
int kernelmidHalf=(kerneldim/2);
if (col>0 && row>0 && row<rows-1 && col<cols-1){
data = data+idx;
//r<=(kernelmidHalf) no funciona, no sé porque, pero cuda y yo tenemos un problema.
for(int r = (-1*kernelmidHalf); r<(kernelmidHalf+1);r++){
for(int c = -1*kernelmidHalf; c<(kernelmidHalf+1);c++){
pixel=*(data+(r*cols*channels)+(c*channels));
kernelVal=*(kernelCenter+(r*-1*kerneldim)+(c*-1));
value+=kernelVal*pixel;
if (pixel<pixelmin){
pixelmin=pixel;
}
if (pixel>pixelmax){
pixelmax=pixel;
}
}
}
*(buff+idx)=value;
atomicMin(outputvars,value);
atomicMax(outputvars+1,value);
atomicMin(outputvars+2,pixelmin);
atomicMax(outputvars+3,pixelmax);
}
//__syncthreads();
/*if (col>0 && row>0 && row<rows-1 && col<cols-1 && getblockthreadIdx()==0){
printf("%d %d %d %d\n",*(outputvars),*(outputvars+1),*(outputvars+2),*(outputvars+3));
}*/
}
__global__
void normalize(int* data,int channels, int rows, int cols,int min, int max, int newMin, int newMax, int mode,int subset){
int pixval=0;
int i = getGlobalIdx_3D_3D(subset);
int row = i / (cols*channels);
int col = (i%(cols*channels))/channels;
if (row>0 && col>0 && row<rows-1 && col<cols-1){
pixval=*(data+i);
if (mode==1){
*(data+i)=(pixval-min)*((newMax-newMin*1.0)/(max-min))+newMin;
}else{
*(data+i)=pixval>newMax?newMax:pixval<newMin?newMin:pixval;
}
}
// __syncthreads();
}
uchar * edge1(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,0,-1},
{0,0,0},
{-1,0,1}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge2(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,1,0},
{1,-4,1},
{0,1,0}
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * edge3(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{-1,-1,-1},
{-1,8,-1},
{-1,-1,-1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * sharpen(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{0,-1,0},
{-1,5,-1},
{0,-1,0},
};
return filter(data,channels,rows,cols,(float*)kernel,3,0,0);
}
uchar * boxblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,1,1},
{1,1,1},
{1,1,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
uchar * gaussianblur(uchar* data,int channels, int rows,int cols){
float kernel[3][3]={
{1,2,1},
{2,4,2},
{1,2,1},
};
return filter(data,channels,rows,cols,(float*)kernel,3,1,0);
}
int * _sobelx(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,0,-1},
{2,0,-2},
{1,0,-1},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
uchar * sobelx(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobelx(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
int * _sobely(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{1,2,1},
{0,0,0},
{-1,-2,-1},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
int * _sobelx10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,0,-3},
{10,0,-10},
{3,0,-3},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
int * _sobely10(int* data,int channels, int rows,int cols, int mode){
float kernel[3][3]={
{3,10,3},
{0,0,0},
{-3,-10,-3},
};
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,(float*)kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* res = _filter(data,channels,rows,cols,d_kernel,3,0,mode);
cudaFree(d_kernel);
return res;
}
uchar * sobely(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobely(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
__global__
void sobelKernel(int *a, int*b,int* output,int* outputvars,int n, int base){
int i = getGlobalIdx_3D_3D(base);
if (i>=n){return;}
int val=sqrtf((*(a+i))*(*(a+i))+(*(b+i))*(*(b+i)));
*(output+i)=val;
atomicMin(outputvars,val);
atomicMax(outputvars+1,val);
}
uchar * sobel(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
cudaMalloc(&minmaxs,sizeof(int)*2);
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
cudaMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx(d_data,channels,rows,cols,-1);
int * filtery = _sobely(d_data,channels,rows,cols,-1);
cudaMemset(minmaxs,INT_MAX,1);
cudaMemset(minmaxs+1,INT_MIN,1);
int N = rows*cols*channels;
int bloques = ceil(N/maxthreads);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
sobelKernel<<<maxblocks,maxthreads>>>(filterx,filtery,d_output,minmaxs,N,sub*maxblocks);
}
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
cudaMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, cudaMemcpyDeviceToHost);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
normalize<<<maxblocks,maxthreads>>>(d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1,sub*maxblocks);
}
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(minmaxs);
cudaFree(d_data);
cudaFree(d_output);
free(datai);
free(output);
cudaFree(filterx);
cudaFree(filtery);
//free(tmpMinMax);
return out;
}
uchar * sobel10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data,*minmaxs;
int * d_output,*output;
cudaMalloc(&minmaxs,sizeof(int)*2);
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
cudaMalloc(&d_output,sizeof(int)*rows*cols*channels);
output = (int*)malloc(sizeof(int)*rows*cols*channels);
int * filterx = _sobelx10(d_data,channels,rows,cols,-1);
int * filtery = _sobely10(d_data,channels,rows,cols,-1);
cudaMemset(minmaxs,INT_MAX,1);
cudaMemset(minmaxs+1,INT_MIN,1);
int N = rows*cols*channels;
int bloques = ceil(N/maxthreads);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
sobelKernel<<<maxblocks,maxthreads>>>(filterx,filtery,d_output,minmaxs,rows*cols*channels,sub*maxblocks);
}
int* tmpMinMax = (int*)malloc(sizeof(int)*2);
cudaMemcpy(tmpMinMax,minmaxs,sizeof(int)*2, cudaMemcpyDeviceToHost);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
normalize<<<maxblocks,maxthreads>>>(d_output,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),0,255,1,sub*maxblocks);
}
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),0,255);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(minmaxs);
cudaFree(d_data);
cudaFree(d_output);
free(datai);
free(output);
cudaFree(filterx);
cudaFree(filtery);
free(tmpMinMax);
return out;
}
uchar * sobely10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobely10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
uchar * sobelx10(uchar* data,int channels, int rows,int cols){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
int* d_output = _sobelx10(d_data,channels,rows,cols,0);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(output);
return out;
}
__global__
void kernelNormAdd(float* kernel,float* output, int kernelNormalize){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float kernelVal=*((float*)kernel+i);
atomicAdd(output+(kernelVal>=0),kernelVal);
__syncthreads();
if (kernelNormalize==1){
*(kernel+i)=kernelVal/(*output+*(output+1));
}else{
*(kernel+i)=kernelVal/(*(output+(kernelVal>=0)));
}
__syncthreads();
}
int* _filter(int* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* buff,*minmaxs;
cudaMalloc(&buff,sizeof(int)*channels*rows*cols);
cudaMalloc(&minmaxs,sizeof(int)*4);
cudaMemset(buff,0,sizeof(int)*channels*rows*cols);
if (kernelNormalize){
float* sumKernel;
cudaMalloc(&sumKernel,sizeof(float)*2);
cudaMemset(sumKernel,0,sizeof(float)*2);
kernelNormAdd<<<1,9>>>(kernel,sumKernel,kernelNormalize);
cudaFree(sumKernel);
}
int N = rows*cols*channels;
int ssize = (sizeof(float)*kerneldim*kerneldim);
cudaMemset(minmaxs,INT_MAX,1);
cudaMemset(minmaxs+1,INT_MIN,1);
cudaMemset(minmaxs+2,INT_MAX,1);
cudaMemset(minmaxs+3,INT_MIN,1);
int bloques = ceil(N/maxthreads);
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
convolution<<<maxblocks,maxthreads,ssize>>>(data,buff,kernel,minmaxs,rows,cols,channels,kerneldim,sub*maxblocks);
cudaError_t err=cudaGetLastError();
if ( cudaSuccess != err ){
printf( "Error!\n" );
printf("GPUassert: %s\n", cudaGetErrorString(err));
}
}
if (outputNormalizationMode>=0){
int* tmpMinMax = (int*)malloc(sizeof(int)*4);
cudaMemcpy(tmpMinMax,minmaxs,sizeof(int)*4, cudaMemcpyDeviceToHost);
//printf("%d %d %d %d\n",*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3));
for(int sub=0;sub<ceil(bloques/maxblocks);sub++){
normalize<<<maxblocks,maxthreads>>>(buff,channels,rows,cols,*(tmpMinMax),*(tmpMinMax+1),*(tmpMinMax+2),*(tmpMinMax+3),outputNormalizationMode,sub*maxblocks);
}
free(tmpMinMax);
}
cudaFree(minmaxs);
return buff;
}
uchar* filter(uchar* data,int channels, int rows,int cols,float *kernel,int kerneldim, int kernelNormalize, int outputNormalizationMode){
int* datai = uchartoint(data,channels*rows*cols);
int * d_data;
cudaMalloc(&d_data,sizeof(int)*channels*rows*cols);
cudaMemcpy(d_data,datai,sizeof(int)*channels*rows*cols,cudaMemcpyHostToDevice);
float * d_kernel;
cudaMalloc(&d_kernel,sizeof(float)*3*3);
cudaMemcpy(d_kernel,kernel,sizeof(float)*3*3,cudaMemcpyHostToDevice);
int* d_output = _filter(d_data,channels,rows,cols,d_kernel,kerneldim,kernelNormalize,outputNormalizationMode);
int* output = (int*)malloc(sizeof(int)*rows*cols*channels);
cudaMemcpy(output,d_output,sizeof(int)*rows*cols*channels, cudaMemcpyDeviceToHost);
uchar* out = inttouchar(output,rows*cols*channels);
cudaFree(d_data);
cudaFree(d_output);
free(datai);
free(output);
return out;
}
int main(int argc, char** argv){
if (argc<3){
cout<<"./nombre imagen filtro"<<endl;
return 0;
}
char* nfiltro=*(argv+2);
uchar* (*filtro)(uchar*,int,int,int)=0;
if(strcmp(nfiltro,"sobel")==0) filtro=sobel;
if(strcmp(nfiltro,"sobelx")==0) filtro=sobelx;
if(strcmp(nfiltro,"sobely")==0) filtro=sobely;
if(strcmp(nfiltro,"sobel10")==0) filtro=sobel10;
if(strcmp(nfiltro,"sobelx10")==0) filtro=sobelx10;
if(strcmp(nfiltro,"sobely10")==0) filtro=sobely10;
if(strcmp(nfiltro,"edge1")==0) filtro=edge1;
if(strcmp(nfiltro,"edge2")==0) filtro=edge2;
if(strcmp(nfiltro,"edge3")==0) filtro=edge3;
if(strcmp(nfiltro,"boxblur")==0) filtro=boxblur;
if(strcmp(nfiltro,"gaussianblur")==0) filtro=gaussianblur;
if(strcmp(nfiltro,"sharpen")==0) filtro=sharpen;
if (filtro==0){
cout<<"metodo erroneo"<<endl;
return 1;
}
Mat image;
image = imread(*(argv+1), CV_LOAD_IMAGE_COLOR);
Mat m1;
if(! image.data ) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl ;
return -1;
}
m1 = Mat (image);
m1.data=filtro(image.data,3,image.rows,image.cols);
namedWindow( "original", WINDOW_AUTOSIZE );
imshow( "original", image );
namedWindow( "filter", WINDOW_AUTOSIZE );
imshow( "filter", m1 );
waitKey(); // Wait for a keystroke in the window
return 0;
}
__device__ int getGlobalIdx_3D_3D(int base)
{
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
blockId+=base;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
__device__ int getblockthreadIdx(){
return (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
}
|
1f3ceeb5091eaab474bcfc2e6578b53435f5f0f3.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_fp16.h>
#include <hipcub/hipcub.hpp>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/fluid/operators/fused/attention_layer_norm.h"
#include "paddle/fluid/operators/fused/attn_gemm.h"
#include "paddle/fluid/operators/fused/fmha_ref.h"
#include "paddle/fluid/operators/fused/fused_dropout_helper.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
static void AllReduce(framework::Tensor &tensor, // NOLINT
const int ring_id,
const platform::CUDADeviceContext &ctx) {
if (ring_id == -1) return;
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto dtype =
platform::ToNCCLDataType(framework::TransToProtoVarType(tensor.dtype()));
int64_t numel = tensor.numel();
const void *sendbuff = tensor.data<T>();
auto place = ctx.GetPlace();
void *recvbuff = tensor.mutable_data<T>(place);
auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place);
auto stream = ctx.stream();
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream));
#else
PADDLE_THROW(platform::errors::Unimplemented(
"PaddlePaddle should compile with NCCL or RCCL when used tensor model "
"parallel op."));
#endif
}
template <typename T>
class FusedAttentionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
auto *input_x = ctx.Input<Tensor>("X");
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *ln_bias = ctx.Input<Tensor>("LnBias");
auto *ln_mean = ctx.Output<Tensor>("LnMean");
auto *ln_var = ctx.Output<Tensor>("LnVariance");
auto *ln_out = ctx.Output<Tensor>("LnOut");
// x: qkv's input [batch_size, seq_len, dim_embed]
// y: qkv's weight: [3, num_head, dim_head, dim_embed]
auto *qkv_weight = ctx.Input<Tensor>("QKVW");
auto *qkv_bias = ctx.Input<Tensor>("QKVBias");
auto *qkv_out = ctx.Output<Tensor>("QKVOut");
auto *qkv_bias_out = ctx.Output<Tensor>("QKVBiasOut");
auto *src_mask = ctx.Input<Tensor>("SrcMask");
auto *transpose_out_2 = ctx.Output<Tensor>("TransposeOut2");
auto *cache_kv = ctx.Input<Tensor>("CacheKV");
auto *cache_kv_out = ctx.Output<Tensor>("CacheKVOut");
auto *qk_out = ctx.Output<Tensor>("QKOut");
auto *qktv_out = ctx.Output<Tensor>("QKTVOut");
auto *softmax_out = ctx.Output<Tensor>("SoftmaxOut");
auto *attn_dropout_mask_out = ctx.Output<Tensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Output<Tensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Output<Tensor>("SrcMaskOut");
auto *fmha_out = ctx.Output<Tensor>("FMHAOut");
auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias");
auto *out_linear_out = ctx.Output<Tensor>("OutLinearOut");
auto *ln_scale_2 = ctx.Input<Tensor>("Ln2Scale");
auto *ln_bias_2 = ctx.Input<Tensor>("Ln2Bias");
auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Output<Tensor>("BiasDropoutResidualOut");
auto *ln_mean_2 = ctx.Output<Tensor>("Ln2Mean");
auto *ln_var_2 = ctx.Output<Tensor>("Ln2Variance");
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate");
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// final output.
auto *out = ctx.Output<Tensor>("Y");
// get data ptr for qkv part.
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
auto *x_data = input_x->data<T>();
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *qkv_out_data = qkv_out->mutable_data<T>(ctx.GetPlace());
auto *qkv_bias_out_data =
(qkv_bias == nullptr) ? nullptr
: qkv_bias_out->mutable_data<T>(ctx.GetPlace());
// get data ptr for FMHA.
auto *transpose_out_2_data =
transpose_out_2->mutable_data<T>(ctx.GetPlace());
auto *cache_kv_out_data =
(cache_kv_out == nullptr)
? nullptr
: cache_kv_out->mutable_data<T>(ctx.GetPlace());
auto *qk_out_data = qk_out->mutable_data<T>(ctx.GetPlace());
auto *qktv_out_data = qktv_out->mutable_data<T>(ctx.GetPlace());
auto *src_mask_out_data =
(src_mask == nullptr) ? nullptr
: src_mask_out->mutable_data<T>(ctx.GetPlace());
auto *softmax_out_data = softmax_out->mutable_data<T>(ctx.GetPlace());
auto *attn_dropout_mask_out_data =
attn_dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace());
auto *attn_dropout_out_data =
attn_dropout_out->mutable_data<T>(ctx.GetPlace());
auto *fmha_out_data = fmha_out->mutable_data<T>(ctx.GetPlace());
// get data ptr for out_linear.
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
auto *out_linear_out_data = out_linear_out->mutable_data<T>(ctx.GetPlace());
// get data ptr for bias+dropout+residual+layernorm
auto *dropout_mask_out_data =
dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace());
auto *final_out_data = out->mutable_data<T>(ctx.GetPlace());
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head = qkv_w_dims[1];
int dim_head = qkv_w_dims[2];
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(),
epsilon, bsz_seq, dim_embed);
bool compute_bias = true;
if (qkv_bias == nullptr) {
compute_bias = false;
}
// (transA, transB, compute_bias) = (false, true, true)
auto qkv_compute =
AttnMatMul<T>(ctx.cuda_device_context(), false, true, bsz_seq,
output_size, input_size, compute_bias);
AttnDropoutParam attn_dropout_param(
is_test_1, dropout_implementation_1, attn_dropout_rate,
is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1);
auto fmha_ref_compute =
FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head,
dim_head, attn_dropout_param);
output_size = hidden_size;
// (transA, transB, compute_bias) = (false, false, false)
// NOTE(Yuang Liu): For general input size == output size, change the
// position won't have effects. For mp, the output size is mp_head * dkey
// which is actually the input size. While the input size is hidden size,
// which is actually the output size. So for out linear, switch the
// input size and output size.
auto out_linear_compute =
AttnMatMul<T>(ctx.cuda_device_context(), false, false, bsz_seq,
input_size, output_size, false);
DropoutParam dropout_param2(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2,
ln_epsilon);
if (pre_layer_norm) {
auto *ln_scale_data =
(ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>());
auto *ln_mean_data = ln_mean->mutable_data<U>(ctx.GetPlace());
auto *ln_var_data = ln_var->mutable_data<U>(ctx.GetPlace());
auto *ln_out_data = ln_out->mutable_data<T>(ctx.GetPlace());
layer_norm_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data,
ln_out_data, ln_mean_data, ln_var_data);
qkv_compute.ComputeForward(qkv_weight, ln_out, qkv_bias, qkv_out,
qkv_bias_out);
} else {
qkv_compute.ComputeForward(qkv_weight, input_x, qkv_bias, qkv_out,
qkv_bias_out);
}
if (qkv_bias == nullptr) {
fmha_ref_compute.ComputeForward(
*qkv_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out,
src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out,
qktv_out, fmha_out);
} else {
fmha_ref_compute.ComputeForward(
*qkv_bias_out, cache_kv, src_mask, transpose_out_2, cache_kv_out,
qk_out, src_mask_out, softmax_out, attn_dropout_mask_out,
attn_dropout_out, qktv_out, fmha_out);
}
// fmha_out: [batch_size, seq_len, num_head, head_dim]
// weight: [embed_dim, embed_dim]
// out_linear_out: [batch_size, seq_len, embed_dim]
out_linear_compute.ComputeForward(out_linear_weight, fmha_out, nullptr,
out_linear_out, nullptr);
// tensor model parallel
AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context());
if (pre_layer_norm) {
// output = (residual + dropout(input + bias))
fused_dropout_layernorm_helper.ResidualDropoutBias(
ctx.cuda_device_context(), out_linear_out_data, x_data,
out_linear_bias_data, final_out_data, dropout_mask_out_data);
} else {
auto *ln_scale_2_data =
(ln_scale_2 == nullptr ? nullptr : ln_scale_2->data<U>());
auto *ln_bias_2_data =
(ln_bias_2 == nullptr ? nullptr : ln_bias_2->data<U>());
auto *bias_dropout_residual_out_data =
bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace());
auto *ln_mean_2_data = ln_mean_2->mutable_data<U>(ctx.GetPlace());
auto *ln_var_2_data = ln_var_2->mutable_data<U>(ctx.GetPlace());
// output = layernorm(residual + dropout(input + bias))
fused_dropout_layernorm_helper.LayernormResidualDropoutBias(
ctx.cuda_device_context(), out_linear_out_data, x_data,
out_linear_bias_data, ln_scale_2_data, ln_bias_2_data,
bias_dropout_residual_out_data, dropout_mask_out_data, final_out_data,
ln_mean_2_data, ln_var_2_data);
}
}
};
template <typename T>
class FusedAttentionGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
const float ln2epsilon = ctx.Attr<float>("ln_epsilon");
float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate");
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// get inputs.
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *d_y_data = d_y->data<T>();
// fw input
auto *input_x = ctx.Input<Tensor>("X");
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *ln_2_scale = ctx.Input<Tensor>("Ln2Scale");
auto *x_data = input_x->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_2_scale_data =
(ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>());
// fw parameters.
auto *src_mask = ctx.Input<Tensor>("SrcMask");
auto *qkv_weight = ctx.Input<Tensor>("QKVW");
auto *qkv_bias = ctx.Input<Tensor>("QKVBias");
auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias");
auto *src_mask_data = (src_mask == nullptr ? nullptr : src_mask->data<T>());
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
// fw output
auto *fmha_out = ctx.Input<Tensor>("FMHAOut");
auto *transpose_out_2 = ctx.Input<Tensor>("TransposeOut2");
auto *qk_out = ctx.Input<Tensor>("QKOut");
auto *qktv_out = ctx.Input<Tensor>("QKTVOut");
auto *softmax_out = ctx.Input<Tensor>("SoftmaxOut");
auto *attn_dropout_mask_out = ctx.Input<Tensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Input<Tensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Input<Tensor>("SrcMaskOut");
auto *out_linear_out = ctx.Input<Tensor>("OutLinearOut");
auto *ln_2_mean = ctx.Input<Tensor>("Ln2Mean");
auto *ln_2_var = ctx.Input<Tensor>("Ln2Variance");
auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Input<Tensor>("BiasDropoutResidualOut");
auto *fmha_out_data = fmha_out->data<T>();
auto *transpose_out_2_data = transpose_out_2->data<T>();
auto *qk_out_data = qk_out->data<T>();
auto *qktv_out_data = qktv_out->data<T>();
auto *softmax_out_data = softmax_out->data<T>();
auto *src_mask_out_data =
(src_mask == nullptr) ? nullptr : src_mask_out->data<T>();
auto *out_linear_out_data = out_linear_out->data<T>();
auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>();
// output's grad
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_qkv_out = ctx.Output<Tensor>(framework::GradVarName("QKVOut"));
auto *d_qkv_bias_out =
ctx.Output<Tensor>(framework::GradVarName("QKVBiasOut"));
auto *d_qktv_out = ctx.Output<Tensor>(framework::GradVarName("QKTVOut"));
auto *d_transpose_out_2 =
ctx.Output<Tensor>(framework::GradVarName("TransposeOut2"));
auto *d_qk_out = ctx.Output<Tensor>(framework::GradVarName("QKOut"));
auto *d_softmax_out =
ctx.Output<Tensor>(framework::GradVarName("SoftmaxOut"));
auto *d_attn_dropout_out =
ctx.Output<Tensor>(framework::GradVarName("AttnDropoutOut"));
auto *d_src_mask_out =
ctx.Output<Tensor>(framework::GradVarName("SrcMaskOut"));
auto *d_fmha_out = ctx.Output<Tensor>(framework::GradVarName("FMHAOut"));
auto *d_out_linear_out =
ctx.Output<Tensor>(framework::GradVarName("OutLinearOut"));
auto *d_bias_dropout_residual_out =
ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut"));
auto *d_x_data = d_x->mutable_data<T>(ctx.GetPlace());
// when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the
// space can be reused.
auto *d_qkv_out_data = (d_qkv_bias_out != nullptr)
? nullptr
: d_qkv_out->mutable_data<T>(ctx.GetPlace());
auto *d_qkv_bias_out_data =
(d_qkv_bias_out == nullptr)
? nullptr
: d_qkv_bias_out->mutable_data<T>(ctx.GetPlace());
auto *d_qktv_out_data = d_qktv_out->mutable_data<T>(ctx.GetPlace());
auto *d_transpose_out_2_data =
d_transpose_out_2->mutable_data<T>(ctx.GetPlace());
auto *d_qk_out_data = d_qk_out->mutable_data<T>(ctx.GetPlace());
auto *d_softmax_out_data = d_softmax_out->mutable_data<T>(ctx.GetPlace());
auto *d_attn_dropout_out_data =
d_attn_dropout_out->mutable_data<T>(ctx.GetPlace());
auto *d_src_mask_out_data =
(src_mask == nullptr) ? nullptr
: d_src_mask_out->mutable_data<T>(ctx.GetPlace());
auto *d_fmha_out_data = d_fmha_out->mutable_data<T>(ctx.GetPlace());
auto *d_out_linear_out_data =
d_out_linear_out->mutable_data<T>(ctx.GetPlace());
// parameter grad
auto *d_qkv_weight = ctx.Output<Tensor>(framework::GradVarName("QKVW"));
auto *d_qkv_bias = ctx.Output<Tensor>(framework::GradVarName("QKVBias"));
auto *d_out_linear_weight =
ctx.Output<Tensor>(framework::GradVarName("OutLinearW"));
auto *d_out_linear_bias =
ctx.Output<Tensor>(framework::GradVarName("OutLinearBias"));
auto *d_ln_2_scale = ctx.Output<Tensor>(framework::GradVarName("Ln2Scale"));
auto *d_ln_2_bias = ctx.Output<Tensor>(framework::GradVarName("Ln2Bias"));
auto *d_qkv_weight_data = d_qkv_weight->mutable_data<T>(ctx.GetPlace());
auto *d_qkv_bias_data = (d_qkv_bias == nullptr)
? nullptr
: d_qkv_bias->mutable_data<T>(ctx.GetPlace());
auto *d_out_linear_weight_data =
d_out_linear_weight->mutable_data<T>(ctx.GetPlace());
auto *d_out_linear_bias_data =
(d_out_linear_bias == nullptr)
? nullptr
: d_out_linear_bias->mutable_data<T>(ctx.GetPlace());
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head = qkv_w_dims[1];
int dim_head = qkv_w_dims[2];
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
Tensor d_residual;
d_residual.Resize(input_x_dims);
T *d_residual_data = d_residual.mutable_data<T>(ctx.GetPlace());
bool transA = false;
bool transB = true;
bool compute_qkv_bias = true;
if (qkv_bias == nullptr) {
compute_qkv_bias = false;
}
auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(),
epsilon, bsz_seq, dim_embed);
auto qkv_compute =
AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq,
output_size, input_size, compute_qkv_bias);
AttnDropoutParam attn_dropout_param(
is_test_1, dropout_implementation_1, attn_dropout_prob,
is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1);
auto fmha_ref_compute =
FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head,
dim_head, attn_dropout_param);
output_size = hidden_size;
transA = false;
transB = false;
bool compute_bias = false;
// (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed)
auto out_linear_compute =
AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq,
input_size, output_size, compute_bias);
DropoutParam dropout_param2(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2,
ln2epsilon);
if (pre_layer_norm) {
fused_dropout_layernorm_helper.ResidualDropoutBiasGrad(
ctx.cuda_device_context(), d_y_data, dropout_mask_out_data,
d_out_linear_out_data, d_residual_data, d_out_linear_bias_data);
} else {
auto *ln_2_mean_data = ln_2_mean->data<U>();
auto *ln_2_var_data = ln_2_var->data<U>();
auto *bias_dropout_residual_out_data =
bias_dropout_residual_out->data<T>();
auto *d_ln_2_scale_data =
(d_ln_2_scale == nullptr ? nullptr : d_ln_2_scale->mutable_data<U>(
ctx.GetPlace()));
auto *d_ln_2_bias_data =
(d_ln_2_bias == nullptr ? nullptr : d_ln_2_bias->mutable_data<U>(
ctx.GetPlace()));
auto *d_bias_dropout_residual_out_data =
d_bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace());
fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad(
ctx.cuda_device_context(), d_y_data, bias_dropout_residual_out_data,
dropout_mask_out_data, ln_2_scale_data, ln_2_mean_data, ln_2_var_data,
d_bias_dropout_residual_out_data, d_ln_2_scale_data, d_ln_2_bias_data,
d_out_linear_out_data, d_out_linear_bias_data, d_residual_data);
}
out_linear_compute.ComputeBackward(fmha_out, out_linear_weight,
d_out_linear_out, d_fmha_out,
d_out_linear_weight, nullptr);
if (qkv_bias != nullptr) {
fmha_ref_compute.ComputeBackward(
*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out,
*attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out,
d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out,
d_transpose_out_2, nullptr, d_qkv_bias_out);
} else {
fmha_ref_compute.ComputeBackward(
*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out,
*attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out,
d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out,
d_transpose_out_2, nullptr, d_qkv_out);
}
if (pre_layer_norm) {
auto *ln_mean = ctx.Input<Tensor>("LnMean");
auto *ln_var = ctx.Input<Tensor>("LnVariance");
auto *ln_out = ctx.Input<Tensor>("LnOut");
auto *ln_mean_data = ln_mean->data<U>();
auto *ln_var_data = ln_var->data<U>();
auto *ln_out_data = ln_out->data<T>();
auto *d_ln_out = ctx.Output<Tensor>(framework::GradVarName("LnOut"));
auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale"));
auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias"));
auto *d_ln_out_data = d_ln_out->mutable_data<T>(ctx.GetPlace());
auto *d_ln_scale_data =
(d_ln_scale == nullptr ? nullptr
: d_ln_scale->mutable_data<U>(ctx.GetPlace()));
auto *d_ln_bias_data =
(d_ln_bias == nullptr ? nullptr
: d_ln_bias->mutable_data<U>(ctx.GetPlace()));
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_bias_out,
d_ln_out, d_qkv_weight, d_qkv_bias);
} else {
qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_out, d_ln_out,
d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context());
layer_norm_compute.ComputeBackward(x_data, d_ln_out_data, ln_scale_data,
ln_mean_data, ln_var_data, d_x_data,
d_ln_scale_data, d_ln_bias_data);
} else {
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_bias_out, d_x,
d_qkv_weight, d_qkv_bias);
} else {
qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_out, d_x,
d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context());
}
// gradient accumulation
std::vector<const Tensor *> ins;
std::vector<Tensor *> outs;
ins.emplace_back(&d_residual);
ins.emplace_back(d_x);
outs.emplace_back(d_x);
int elewise_add_axis = -1;
phi::funcs::BroadcastKernel<phi::ElementwiseType::kBinary, T, T>(
ctx.cuda_device_context(), ins, &outs, elewise_add_axis,
phi::funcs::AddFunctor<T>());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(fused_attention, ops::FusedAttentionOpKernel<float>,
ops::FusedAttentionOpKernel<double>,
ops::FusedAttentionOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(fused_attention_grad,
ops::FusedAttentionGradKernel<float>,
ops::FusedAttentionGradKernel<double>,
ops::FusedAttentionGradKernel<plat::float16>);
| 1f3ceeb5091eaab474bcfc2e6578b53435f5f0f3.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda_fp16.h>
#include <cub/cub.cuh>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/phi/kernels/funcs/broadcast_function.h"
#include "paddle/phi/kernels/funcs/elementwise_functor.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/fluid/operators/fused/attention_layer_norm.h"
#include "paddle/fluid/operators/fused/attn_gemm.h"
#include "paddle/fluid/operators/fused/fmha_ref.h"
#include "paddle/fluid/operators/fused/fused_dropout_helper.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T>
static void AllReduce(framework::Tensor &tensor, // NOLINT
const int ring_id,
const platform::CUDADeviceContext &ctx) {
if (ring_id == -1) return;
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
auto dtype =
platform::ToNCCLDataType(framework::TransToProtoVarType(tensor.dtype()));
int64_t numel = tensor.numel();
const void *sendbuff = tensor.data<T>();
auto place = ctx.GetPlace();
void *recvbuff = tensor.mutable_data<T>(place);
auto comm = platform::NCCLCommContext::Instance().Get(ring_id, place);
auto stream = ctx.stream();
PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce(
sendbuff, recvbuff, numel, dtype, ncclSum, comm->comm(), stream));
#else
PADDLE_THROW(platform::errors::Unimplemented(
"PaddlePaddle should compile with NCCL or RCCL when used tensor model "
"parallel op."));
#endif
}
template <typename T>
class FusedAttentionOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
auto *input_x = ctx.Input<Tensor>("X");
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *ln_bias = ctx.Input<Tensor>("LnBias");
auto *ln_mean = ctx.Output<Tensor>("LnMean");
auto *ln_var = ctx.Output<Tensor>("LnVariance");
auto *ln_out = ctx.Output<Tensor>("LnOut");
// x: qkv's input [batch_size, seq_len, dim_embed]
// y: qkv's weight: [3, num_head, dim_head, dim_embed]
auto *qkv_weight = ctx.Input<Tensor>("QKVW");
auto *qkv_bias = ctx.Input<Tensor>("QKVBias");
auto *qkv_out = ctx.Output<Tensor>("QKVOut");
auto *qkv_bias_out = ctx.Output<Tensor>("QKVBiasOut");
auto *src_mask = ctx.Input<Tensor>("SrcMask");
auto *transpose_out_2 = ctx.Output<Tensor>("TransposeOut2");
auto *cache_kv = ctx.Input<Tensor>("CacheKV");
auto *cache_kv_out = ctx.Output<Tensor>("CacheKVOut");
auto *qk_out = ctx.Output<Tensor>("QKOut");
auto *qktv_out = ctx.Output<Tensor>("QKTVOut");
auto *softmax_out = ctx.Output<Tensor>("SoftmaxOut");
auto *attn_dropout_mask_out = ctx.Output<Tensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Output<Tensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Output<Tensor>("SrcMaskOut");
auto *fmha_out = ctx.Output<Tensor>("FMHAOut");
auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias");
auto *out_linear_out = ctx.Output<Tensor>("OutLinearOut");
auto *ln_scale_2 = ctx.Input<Tensor>("Ln2Scale");
auto *ln_bias_2 = ctx.Input<Tensor>("Ln2Bias");
auto *dropout_mask_out = ctx.Output<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Output<Tensor>("BiasDropoutResidualOut");
auto *ln_mean_2 = ctx.Output<Tensor>("Ln2Mean");
auto *ln_var_2 = ctx.Output<Tensor>("Ln2Variance");
const float ln_epsilon = ctx.Attr<float>("ln_epsilon");
float attn_dropout_rate = ctx.Attr<float>("attn_dropout_rate");
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// final output.
auto *out = ctx.Output<Tensor>("Y");
// get data ptr for qkv part.
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
auto *x_data = input_x->data<T>();
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *qkv_out_data = qkv_out->mutable_data<T>(ctx.GetPlace());
auto *qkv_bias_out_data =
(qkv_bias == nullptr) ? nullptr
: qkv_bias_out->mutable_data<T>(ctx.GetPlace());
// get data ptr for FMHA.
auto *transpose_out_2_data =
transpose_out_2->mutable_data<T>(ctx.GetPlace());
auto *cache_kv_out_data =
(cache_kv_out == nullptr)
? nullptr
: cache_kv_out->mutable_data<T>(ctx.GetPlace());
auto *qk_out_data = qk_out->mutable_data<T>(ctx.GetPlace());
auto *qktv_out_data = qktv_out->mutable_data<T>(ctx.GetPlace());
auto *src_mask_out_data =
(src_mask == nullptr) ? nullptr
: src_mask_out->mutable_data<T>(ctx.GetPlace());
auto *softmax_out_data = softmax_out->mutable_data<T>(ctx.GetPlace());
auto *attn_dropout_mask_out_data =
attn_dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace());
auto *attn_dropout_out_data =
attn_dropout_out->mutable_data<T>(ctx.GetPlace());
auto *fmha_out_data = fmha_out->mutable_data<T>(ctx.GetPlace());
// get data ptr for out_linear.
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
auto *out_linear_out_data = out_linear_out->mutable_data<T>(ctx.GetPlace());
// get data ptr for bias+dropout+residual+layernorm
auto *dropout_mask_out_data =
dropout_mask_out->mutable_data<uint8_t>(ctx.GetPlace());
auto *final_out_data = out->mutable_data<T>(ctx.GetPlace());
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head = qkv_w_dims[1];
int dim_head = qkv_w_dims[2];
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(),
epsilon, bsz_seq, dim_embed);
bool compute_bias = true;
if (qkv_bias == nullptr) {
compute_bias = false;
}
// (transA, transB, compute_bias) = (false, true, true)
auto qkv_compute =
AttnMatMul<T>(ctx.cuda_device_context(), false, true, bsz_seq,
output_size, input_size, compute_bias);
AttnDropoutParam attn_dropout_param(
is_test_1, dropout_implementation_1, attn_dropout_rate,
is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1);
auto fmha_ref_compute =
FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head,
dim_head, attn_dropout_param);
output_size = hidden_size;
// (transA, transB, compute_bias) = (false, false, false)
// NOTE(Yuang Liu): For general input size == output size, change the
// position won't have effects. For mp, the output size is mp_head * dkey
// which is actually the input size. While the input size is hidden size,
// which is actually the output size. So for out linear, switch the
// input size and output size.
auto out_linear_compute =
AttnMatMul<T>(ctx.cuda_device_context(), false, false, bsz_seq,
input_size, output_size, false);
DropoutParam dropout_param2(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2,
ln_epsilon);
if (pre_layer_norm) {
auto *ln_scale_data =
(ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_bias_data = (ln_bias == nullptr ? nullptr : ln_bias->data<U>());
auto *ln_mean_data = ln_mean->mutable_data<U>(ctx.GetPlace());
auto *ln_var_data = ln_var->mutable_data<U>(ctx.GetPlace());
auto *ln_out_data = ln_out->mutable_data<T>(ctx.GetPlace());
layer_norm_compute.ComputeForward(x_data, ln_scale_data, ln_bias_data,
ln_out_data, ln_mean_data, ln_var_data);
qkv_compute.ComputeForward(qkv_weight, ln_out, qkv_bias, qkv_out,
qkv_bias_out);
} else {
qkv_compute.ComputeForward(qkv_weight, input_x, qkv_bias, qkv_out,
qkv_bias_out);
}
if (qkv_bias == nullptr) {
fmha_ref_compute.ComputeForward(
*qkv_out, cache_kv, src_mask, transpose_out_2, cache_kv_out, qk_out,
src_mask_out, softmax_out, attn_dropout_mask_out, attn_dropout_out,
qktv_out, fmha_out);
} else {
fmha_ref_compute.ComputeForward(
*qkv_bias_out, cache_kv, src_mask, transpose_out_2, cache_kv_out,
qk_out, src_mask_out, softmax_out, attn_dropout_mask_out,
attn_dropout_out, qktv_out, fmha_out);
}
// fmha_out: [batch_size, seq_len, num_head, head_dim]
// weight: [embed_dim, embed_dim]
// out_linear_out: [batch_size, seq_len, embed_dim]
out_linear_compute.ComputeForward(out_linear_weight, fmha_out, nullptr,
out_linear_out, nullptr);
// tensor model parallel
AllReduce<T>(*out_linear_out, ring_id, ctx.cuda_device_context());
if (pre_layer_norm) {
// output = (residual + dropout(input + bias))
fused_dropout_layernorm_helper.ResidualDropoutBias(
ctx.cuda_device_context(), out_linear_out_data, x_data,
out_linear_bias_data, final_out_data, dropout_mask_out_data);
} else {
auto *ln_scale_2_data =
(ln_scale_2 == nullptr ? nullptr : ln_scale_2->data<U>());
auto *ln_bias_2_data =
(ln_bias_2 == nullptr ? nullptr : ln_bias_2->data<U>());
auto *bias_dropout_residual_out_data =
bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace());
auto *ln_mean_2_data = ln_mean_2->mutable_data<U>(ctx.GetPlace());
auto *ln_var_2_data = ln_var_2->mutable_data<U>(ctx.GetPlace());
// output = layernorm(residual + dropout(input + bias))
fused_dropout_layernorm_helper.LayernormResidualDropoutBias(
ctx.cuda_device_context(), out_linear_out_data, x_data,
out_linear_bias_data, ln_scale_2_data, ln_bias_2_data,
bias_dropout_residual_out_data, dropout_mask_out_data, final_out_data,
ln_mean_2_data, ln_var_2_data);
}
}
};
template <typename T>
class FusedAttentionGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const auto pre_layer_norm = ctx.Attr<bool>("pre_layer_norm");
const float epsilon = ctx.Attr<float>("epsilon");
const float ln2epsilon = ctx.Attr<float>("ln_epsilon");
float attn_dropout_prob = ctx.Attr<float>("attn_dropout_rate");
bool is_test_1 = ctx.Attr<bool>("is_test");
auto &dropout_implementation_1 =
ctx.Attr<std::string>("attn_dropout_implementation");
bool is_upscale_in_train_1 =
(dropout_implementation_1 == "upscale_in_train");
auto *seed_1 = ctx.HasInput("Seed1") ? ctx.Input<Tensor>("Seed1") : nullptr;
bool is_fix_seed_1 = ctx.Attr<bool>("attn_dropout_fix_seed");
int seed_val_1 = ctx.Attr<int>("attn_dropout_seed");
int ring_id = ctx.Attr<int>("ring_id");
// get inputs.
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *d_y_data = d_y->data<T>();
// fw input
auto *input_x = ctx.Input<Tensor>("X");
auto *ln_scale = ctx.Input<Tensor>("LnScale");
auto *ln_2_scale = ctx.Input<Tensor>("Ln2Scale");
auto *x_data = input_x->data<T>();
auto *ln_scale_data = (ln_scale == nullptr ? nullptr : ln_scale->data<U>());
auto *ln_2_scale_data =
(ln_2_scale == nullptr ? nullptr : ln_2_scale->data<U>());
// fw parameters.
auto *src_mask = ctx.Input<Tensor>("SrcMask");
auto *qkv_weight = ctx.Input<Tensor>("QKVW");
auto *qkv_bias = ctx.Input<Tensor>("QKVBias");
auto *out_linear_weight = ctx.Input<Tensor>("OutLinearW");
auto *out_linear_bias = ctx.Input<Tensor>("OutLinearBias");
auto *src_mask_data = (src_mask == nullptr ? nullptr : src_mask->data<T>());
auto *qkv_weight_data = qkv_weight->data<T>();
auto *qkv_bias_data = (qkv_bias == nullptr) ? nullptr : qkv_bias->data<T>();
auto *out_linear_weight_data = out_linear_weight->data<T>();
auto *out_linear_bias_data =
(out_linear_bias == nullptr) ? nullptr : out_linear_bias->data<T>();
// fw output
auto *fmha_out = ctx.Input<Tensor>("FMHAOut");
auto *transpose_out_2 = ctx.Input<Tensor>("TransposeOut2");
auto *qk_out = ctx.Input<Tensor>("QKOut");
auto *qktv_out = ctx.Input<Tensor>("QKTVOut");
auto *softmax_out = ctx.Input<Tensor>("SoftmaxOut");
auto *attn_dropout_mask_out = ctx.Input<Tensor>("AttnDropoutMaskOut");
auto *attn_dropout_out = ctx.Input<Tensor>("AttnDropoutOut");
auto *src_mask_out = ctx.Input<Tensor>("SrcMaskOut");
auto *out_linear_out = ctx.Input<Tensor>("OutLinearOut");
auto *ln_2_mean = ctx.Input<Tensor>("Ln2Mean");
auto *ln_2_var = ctx.Input<Tensor>("Ln2Variance");
auto *dropout_mask_out = ctx.Input<Tensor>("DropoutMaskOut");
auto *bias_dropout_residual_out =
ctx.Input<Tensor>("BiasDropoutResidualOut");
auto *fmha_out_data = fmha_out->data<T>();
auto *transpose_out_2_data = transpose_out_2->data<T>();
auto *qk_out_data = qk_out->data<T>();
auto *qktv_out_data = qktv_out->data<T>();
auto *softmax_out_data = softmax_out->data<T>();
auto *src_mask_out_data =
(src_mask == nullptr) ? nullptr : src_mask_out->data<T>();
auto *out_linear_out_data = out_linear_out->data<T>();
auto *dropout_mask_out_data = dropout_mask_out->data<uint8_t>();
// output's grad
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_qkv_out = ctx.Output<Tensor>(framework::GradVarName("QKVOut"));
auto *d_qkv_bias_out =
ctx.Output<Tensor>(framework::GradVarName("QKVBiasOut"));
auto *d_qktv_out = ctx.Output<Tensor>(framework::GradVarName("QKTVOut"));
auto *d_transpose_out_2 =
ctx.Output<Tensor>(framework::GradVarName("TransposeOut2"));
auto *d_qk_out = ctx.Output<Tensor>(framework::GradVarName("QKOut"));
auto *d_softmax_out =
ctx.Output<Tensor>(framework::GradVarName("SoftmaxOut"));
auto *d_attn_dropout_out =
ctx.Output<Tensor>(framework::GradVarName("AttnDropoutOut"));
auto *d_src_mask_out =
ctx.Output<Tensor>(framework::GradVarName("SrcMaskOut"));
auto *d_fmha_out = ctx.Output<Tensor>(framework::GradVarName("FMHAOut"));
auto *d_out_linear_out =
ctx.Output<Tensor>(framework::GradVarName("OutLinearOut"));
auto *d_bias_dropout_residual_out =
ctx.Output<Tensor>(framework::GradVarName("BiasDropoutResidualOut"));
auto *d_x_data = d_x->mutable_data<T>(ctx.GetPlace());
// when qkv_bias is not nullptr, d_qkv_out is equals to d_qkv_bias_out, the
// space can be reused.
auto *d_qkv_out_data = (d_qkv_bias_out != nullptr)
? nullptr
: d_qkv_out->mutable_data<T>(ctx.GetPlace());
auto *d_qkv_bias_out_data =
(d_qkv_bias_out == nullptr)
? nullptr
: d_qkv_bias_out->mutable_data<T>(ctx.GetPlace());
auto *d_qktv_out_data = d_qktv_out->mutable_data<T>(ctx.GetPlace());
auto *d_transpose_out_2_data =
d_transpose_out_2->mutable_data<T>(ctx.GetPlace());
auto *d_qk_out_data = d_qk_out->mutable_data<T>(ctx.GetPlace());
auto *d_softmax_out_data = d_softmax_out->mutable_data<T>(ctx.GetPlace());
auto *d_attn_dropout_out_data =
d_attn_dropout_out->mutable_data<T>(ctx.GetPlace());
auto *d_src_mask_out_data =
(src_mask == nullptr) ? nullptr
: d_src_mask_out->mutable_data<T>(ctx.GetPlace());
auto *d_fmha_out_data = d_fmha_out->mutable_data<T>(ctx.GetPlace());
auto *d_out_linear_out_data =
d_out_linear_out->mutable_data<T>(ctx.GetPlace());
// parameter grad
auto *d_qkv_weight = ctx.Output<Tensor>(framework::GradVarName("QKVW"));
auto *d_qkv_bias = ctx.Output<Tensor>(framework::GradVarName("QKVBias"));
auto *d_out_linear_weight =
ctx.Output<Tensor>(framework::GradVarName("OutLinearW"));
auto *d_out_linear_bias =
ctx.Output<Tensor>(framework::GradVarName("OutLinearBias"));
auto *d_ln_2_scale = ctx.Output<Tensor>(framework::GradVarName("Ln2Scale"));
auto *d_ln_2_bias = ctx.Output<Tensor>(framework::GradVarName("Ln2Bias"));
auto *d_qkv_weight_data = d_qkv_weight->mutable_data<T>(ctx.GetPlace());
auto *d_qkv_bias_data = (d_qkv_bias == nullptr)
? nullptr
: d_qkv_bias->mutable_data<T>(ctx.GetPlace());
auto *d_out_linear_weight_data =
d_out_linear_weight->mutable_data<T>(ctx.GetPlace());
auto *d_out_linear_bias_data =
(d_out_linear_bias == nullptr)
? nullptr
: d_out_linear_bias->mutable_data<T>(ctx.GetPlace());
const auto input_x_dims = input_x->dims();
const auto qkv_w_dims = qkv_weight->dims();
int batch_size = input_x_dims[0];
int max_seq_len = input_x_dims[1];
int dim_embed = input_x_dims[2];
int num_head = qkv_w_dims[1];
int dim_head = qkv_w_dims[2];
int bsz_seq = batch_size * max_seq_len;
int hidden_size = num_head * dim_head;
int output_size = 3 * hidden_size;
int input_size = dim_embed;
Tensor d_residual;
d_residual.Resize(input_x_dims);
T *d_residual_data = d_residual.mutable_data<T>(ctx.GetPlace());
bool transA = false;
bool transB = true;
bool compute_qkv_bias = true;
if (qkv_bias == nullptr) {
compute_qkv_bias = false;
}
auto layer_norm_compute = AttnLayerNorm<T>(ctx.cuda_device_context(),
epsilon, bsz_seq, dim_embed);
auto qkv_compute =
AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq,
output_size, input_size, compute_qkv_bias);
AttnDropoutParam attn_dropout_param(
is_test_1, dropout_implementation_1, attn_dropout_prob,
is_upscale_in_train_1, is_fix_seed_1, seed_val_1, seed_1);
auto fmha_ref_compute =
FMHARef<T>(ctx.cuda_device_context(), batch_size, max_seq_len, num_head,
dim_head, attn_dropout_param);
output_size = hidden_size;
transA = false;
transB = false;
bool compute_bias = false;
// (b*s, num_head * dim_head) * (num_head * dim_head, dim_embed)
auto out_linear_compute =
AttnMatMul<T>(ctx.cuda_device_context(), transA, transB, bsz_seq,
input_size, output_size, compute_bias);
DropoutParam dropout_param2(ctx, 0);
FusedDropoutLayerNormHelper<T, uint8_t> fused_dropout_layernorm_helper(
ctx.cuda_device_context(), bsz_seq, dim_embed, dropout_param2,
ln2epsilon);
if (pre_layer_norm) {
fused_dropout_layernorm_helper.ResidualDropoutBiasGrad(
ctx.cuda_device_context(), d_y_data, dropout_mask_out_data,
d_out_linear_out_data, d_residual_data, d_out_linear_bias_data);
} else {
auto *ln_2_mean_data = ln_2_mean->data<U>();
auto *ln_2_var_data = ln_2_var->data<U>();
auto *bias_dropout_residual_out_data =
bias_dropout_residual_out->data<T>();
auto *d_ln_2_scale_data =
(d_ln_2_scale == nullptr ? nullptr : d_ln_2_scale->mutable_data<U>(
ctx.GetPlace()));
auto *d_ln_2_bias_data =
(d_ln_2_bias == nullptr ? nullptr : d_ln_2_bias->mutable_data<U>(
ctx.GetPlace()));
auto *d_bias_dropout_residual_out_data =
d_bias_dropout_residual_out->mutable_data<T>(ctx.GetPlace());
fused_dropout_layernorm_helper.LayernormResidualDropoutBiasGrad(
ctx.cuda_device_context(), d_y_data, bias_dropout_residual_out_data,
dropout_mask_out_data, ln_2_scale_data, ln_2_mean_data, ln_2_var_data,
d_bias_dropout_residual_out_data, d_ln_2_scale_data, d_ln_2_bias_data,
d_out_linear_out_data, d_out_linear_bias_data, d_residual_data);
}
out_linear_compute.ComputeBackward(fmha_out, out_linear_weight,
d_out_linear_out, d_fmha_out,
d_out_linear_weight, nullptr);
if (qkv_bias != nullptr) {
fmha_ref_compute.ComputeBackward(
*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out,
*attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out,
d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out,
d_transpose_out_2, nullptr, d_qkv_bias_out);
} else {
fmha_ref_compute.ComputeBackward(
*transpose_out_2, src_mask, *softmax_out, *attn_dropout_mask_out,
*attn_dropout_out, *qk_out, *src_mask_out, *d_fmha_out, d_qktv_out,
d_attn_dropout_out, d_softmax_out, d_src_mask_out, d_qk_out,
d_transpose_out_2, nullptr, d_qkv_out);
}
if (pre_layer_norm) {
auto *ln_mean = ctx.Input<Tensor>("LnMean");
auto *ln_var = ctx.Input<Tensor>("LnVariance");
auto *ln_out = ctx.Input<Tensor>("LnOut");
auto *ln_mean_data = ln_mean->data<U>();
auto *ln_var_data = ln_var->data<U>();
auto *ln_out_data = ln_out->data<T>();
auto *d_ln_out = ctx.Output<Tensor>(framework::GradVarName("LnOut"));
auto *d_ln_scale = ctx.Output<Tensor>(framework::GradVarName("LnScale"));
auto *d_ln_bias = ctx.Output<Tensor>(framework::GradVarName("LnBias"));
auto *d_ln_out_data = d_ln_out->mutable_data<T>(ctx.GetPlace());
auto *d_ln_scale_data =
(d_ln_scale == nullptr ? nullptr
: d_ln_scale->mutable_data<U>(ctx.GetPlace()));
auto *d_ln_bias_data =
(d_ln_bias == nullptr ? nullptr
: d_ln_bias->mutable_data<U>(ctx.GetPlace()));
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_bias_out,
d_ln_out, d_qkv_weight, d_qkv_bias);
} else {
qkv_compute.ComputeBackward(ln_out, qkv_weight, d_qkv_out, d_ln_out,
d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_ln_out, ring_id, ctx.cuda_device_context());
layer_norm_compute.ComputeBackward(x_data, d_ln_out_data, ln_scale_data,
ln_mean_data, ln_var_data, d_x_data,
d_ln_scale_data, d_ln_bias_data);
} else {
if (qkv_bias != nullptr) {
qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_bias_out, d_x,
d_qkv_weight, d_qkv_bias);
} else {
qkv_compute.ComputeBackward(input_x, qkv_weight, d_qkv_out, d_x,
d_qkv_weight, d_qkv_bias);
}
// tensor model parallel
AllReduce<T>(*d_x, ring_id, ctx.cuda_device_context());
}
// gradient accumulation
std::vector<const Tensor *> ins;
std::vector<Tensor *> outs;
ins.emplace_back(&d_residual);
ins.emplace_back(d_x);
outs.emplace_back(d_x);
int elewise_add_axis = -1;
phi::funcs::BroadcastKernel<phi::ElementwiseType::kBinary, T, T>(
ctx.cuda_device_context(), ins, &outs, elewise_add_axis,
phi::funcs::AddFunctor<T>());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(fused_attention, ops::FusedAttentionOpKernel<float>,
ops::FusedAttentionOpKernel<double>,
ops::FusedAttentionOpKernel<plat::float16>);
REGISTER_OP_CUDA_KERNEL(fused_attention_grad,
ops::FusedAttentionGradKernel<float>,
ops::FusedAttentionGradKernel<double>,
ops::FusedAttentionGradKernel<plat::float16>);
|
ab3564c0985b18a2a0e171ad620587ac4f0fc3e7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_math.h>
#include <helper_cuda.h>
#include <float.h>
#include "ray.h"
#include "triangle.h"
#include "camera.h"
#include "aabb.h"
#include "node.h"
#define MAX_DEPTH 0
#define SMALLEST_DIST 1e-4
__constant__ Camera cam;
__constant__ int c_image_width;
__constant__ int c_image_height;
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image);
__device__ float planeIntersection(Ray ray, int dimension, float splitPoint)
{
float *origin = &(ray.getOrigin().x);
float *direction = &(ray.getDirection().x);
if (direction[dimension] == 0.0) {return FLT_MAX; }
float t = (splitPoint - origin[dimension]) / direction[dimension];
return t;
}
__device__ float closestHitObject(Ray& ray, Triangle *d_objects, Node *d_nodesList, Triangle &closestObject, long long int &intersectionTests)
{
Node root = d_nodesList[0];
float tmin, tmax;
bool intersection = root.boundingBox.intersect(ray, tmin, tmax);
// No intersection with the scene bounding box. Return immediately.
if (!intersection)
{
return FLT_MAX;
}
else
{
float tclosest = FLT_MAX;
Node nearChild, farChild;
// Initialize stack to hold traversal nodes.
int stackPosition = 0;
//int stackMemory = 16;
// Node *stack = (Node *) malloc (stackMemory * sizeof(Node));
Node stack[24];
// Find intersection point with splitting plane.
float tPlane = planeIntersection(ray, root.splitDimension, root.splitPoint);
// Compute value along splitting dimension for plane, and tmin, tmax.
float3 tPlanePoint = ray.getOrigin() + tPlane * ray.getDirection();
float tPlaneDimPoint = (&(tPlanePoint.x))[root.splitDimension];
float3 tMinPoint = ray.getOrigin() + tmin * ray.getDirection();
float tMinDimPoint = (&(tMinPoint.x))[root.splitDimension];
float3 tMaxPoint = ray.getOrigin() + tmax * ray.getDirection();
float tMaxDimPoint = (&(tMaxPoint.x))[root.splitDimension];
// Both children are intersected by the ray.
if (0 < tPlane && tPlane < tmax)
{
// Classify the near and far children for the ray.
if (tMinDimPoint < tPlaneDimPoint)
{
nearChild = d_nodesList[root.leftChildIndex];
farChild = d_nodesList[root.rightChildIndex];
}
else
{
farChild = d_nodesList[root.leftChildIndex];
nearChild = d_nodesList[root.rightChildIndex];
}
stack[stackPosition++] = farChild;
stack[stackPosition++] = nearChild;
}
// Only one child is intersected.
else if (tMinDimPoint < tPlaneDimPoint)
{
nearChild = d_nodesList[root.leftChildIndex];
stack[stackPosition++] = nearChild;
}
else
{
nearChild = d_nodesList[root.rightChildIndex];
stack[stackPosition++] = nearChild;
}
// Iterative traversal of the kdtree using the stack.
while(stackPosition!=0)
{
Node currentNode = stack[stackPosition-1];
stackPosition--;
/* if (4 * stackPosition < stackMemory && stackMemory > 8)
{
stack = (Node *) realloc(stack, stackMemory * sizeof(Node) / 2);
stackMemory = stackMemory / 2;
}
*/
// if not a leaf
if(currentNode.leftChildIndex != -1)
{
// Find intersection point with splitting plane.
tPlane = planeIntersection(ray, currentNode.splitDimension, currentNode.splitPoint);
// Compute value along splitting dimension for plane, and tmin, tmax.
tPlanePoint = ray.getOrigin() + tPlane * ray.getDirection();
tPlaneDimPoint = (&(tPlanePoint.x))[currentNode.splitDimension];
tMinPoint = ray.getOrigin() + tmin * ray.getDirection();
tMinDimPoint = (&(tMinPoint.x))[currentNode.splitDimension];
tMaxPoint = ray.getOrigin() + tmax * ray.getDirection();
tMaxDimPoint = (&(tMaxPoint.x))[currentNode.splitDimension];
// Both children are intersected by the ray.
if (0 < tPlane && tPlane < tmax)
{
// Classify the near and far children for the ray.
if (tMinDimPoint < tPlaneDimPoint)
{
nearChild = d_nodesList[currentNode.leftChildIndex];
farChild = d_nodesList[currentNode.rightChildIndex];
}
else
{
farChild = d_nodesList[currentNode.leftChildIndex];
nearChild = d_nodesList[currentNode.rightChildIndex];
}
// Allocate more memory in the stack if needed.
/* if (stackPosition == stackMemory)
{
stack = (Node *) realloc (stack, 2 * stackMemory * sizeof(Node));
stackMemory = stackMemory * 2;
}
*/
stack[stackPosition++] = farChild;
stack[stackPosition++] = nearChild;
}
// Only one child is intersected.
else if (tMinDimPoint < tPlaneDimPoint)
{
Node nearChild = d_nodesList[currentNode.leftChildIndex];
// Allocate more memory in the stack if needed.
/* if (stackPosition == stackMemory)
{
stack = (Node *) realloc (stack, 2 * stackMemory * sizeof(Node));
stackMemory = stackMemory * 2;
}
*/
stack[stackPosition++] = nearChild;
}
else
{
Node nearChild = d_nodesList[currentNode.rightChildIndex];
// Allocate more memory in the stack if needed.
/* if (stackPosition == stackMemory)
{
stack = (Node *) realloc (stack, 2 * stackMemory * sizeof(Node));
stackMemory = stackMemory * 2;
}
*/
stack[stackPosition++] = nearChild;
}
}
// Leaf node
else
{
tclosest = FLT_MAX;
float t;
// Intersect ray with all triangles in the leaf. Find closest triangle. Return if any intersection is found.
for (int i=currentNode.startIndex; i < currentNode.endIndex; i++)
{
t = d_objects[i].intersect(ray);
if (t < tclosest)
{
tclosest = t;
closestObject = d_objects[i];
stackPosition = 0;
}
}
intersectionTests += (currentNode.endIndex - currentNode.startIndex ) - 1;
}
}
return tclosest;
}
}
__device__ float3 traceRay(Ray& ray, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count, Node *d_nodesList, long long int &intersections)
{
// Exit if MAX_DEPTH reached.
if (ray.getDepth() > MAX_DEPTH)
{
return make_float3(0.0, 0.0, 0.0);
}
else
{
float tmin = FLT_MAX;
Triangle closestObject;
// Find closest hit object.
tmin = closestHitObject(ray, d_objects, d_nodesList, closestObject, intersections);
// Some object was hit.
if (tmin != FLT_MAX)
{
// float3 final_color = make_float3(1.0, 1.0, 0.0);
float3 final_color = closestObject.shade(ray, tmin, d_objects, objects_count, lights, lights_count, cam);
/* if (closestObject.isReflective())
{
Ray reflectedRay = closestObject.getReflectedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kr * traceRay(reflectedRay, d_objects, objects_count, lights, lights_count, d_nodesList);
final_color = final_color + r_color;
}
if (closestObject.isRefractive())
{
Ray refractedRay = closestObject.getRefractedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kt * traceRay(refractedRay, d_objects, objects_count, lights, lights_count, d_nodesList);
final_color = final_color + r_color;
}
*/ return final_color;
}
else
{
return make_float3(0.0, 0.0, 0.0);
}
}
}
__global__ void colorPixel(unsigned char *d_image, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count, long long int *times, Node *d_nodesList)
{
//long long int start = clock64();
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= c_image_width || y >= c_image_height)
return;
float3 ray_dir = cam.get_ray_direction(x, y);
Ray ray(cam.getPosition(), ray_dir);
long long int intersections = 0;
float3 pixel_color = traceRay(ray, d_objects, objects_count, lights, lights_count, d_nodesList, intersections);
//long long int end = clock64();
//long long int duration = end - start;
times[y * c_image_width + x] = intersections;
writePixel(x, y, c_image_width, pixel_color, d_image);
}
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image)
{
d_image[(y*image_width + x) * 3 + 0] = (unsigned char) (color.x * 255);
d_image[(y*image_width + x) * 3 + 1] = (unsigned char) (color.y * 255);
d_image[(y*image_width + x) * 3 + 2] = (unsigned char) (color.z * 255);
}
| ab3564c0985b18a2a0e171ad620587ac4f0fc3e7.cu | #include <cuda_runtime.h>
#include <helper_math.h>
#include <helper_cuda.h>
#include <float.h>
#include "ray.h"
#include "triangle.h"
#include "camera.h"
#include "aabb.h"
#include "node.h"
#define MAX_DEPTH 0
#define SMALLEST_DIST 1e-4
__constant__ Camera cam;
__constant__ int c_image_width;
__constant__ int c_image_height;
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image);
__device__ float planeIntersection(Ray ray, int dimension, float splitPoint)
{
float *origin = &(ray.getOrigin().x);
float *direction = &(ray.getDirection().x);
if (direction[dimension] == 0.0) {return FLT_MAX; }
float t = (splitPoint - origin[dimension]) / direction[dimension];
return t;
}
__device__ float closestHitObject(Ray& ray, Triangle *d_objects, Node *d_nodesList, Triangle &closestObject, long long int &intersectionTests)
{
Node root = d_nodesList[0];
float tmin, tmax;
bool intersection = root.boundingBox.intersect(ray, tmin, tmax);
// No intersection with the scene bounding box. Return immediately.
if (!intersection)
{
return FLT_MAX;
}
else
{
float tclosest = FLT_MAX;
Node nearChild, farChild;
// Initialize stack to hold traversal nodes.
int stackPosition = 0;
//int stackMemory = 16;
// Node *stack = (Node *) malloc (stackMemory * sizeof(Node));
Node stack[24];
// Find intersection point with splitting plane.
float tPlane = planeIntersection(ray, root.splitDimension, root.splitPoint);
// Compute value along splitting dimension for plane, and tmin, tmax.
float3 tPlanePoint = ray.getOrigin() + tPlane * ray.getDirection();
float tPlaneDimPoint = (&(tPlanePoint.x))[root.splitDimension];
float3 tMinPoint = ray.getOrigin() + tmin * ray.getDirection();
float tMinDimPoint = (&(tMinPoint.x))[root.splitDimension];
float3 tMaxPoint = ray.getOrigin() + tmax * ray.getDirection();
float tMaxDimPoint = (&(tMaxPoint.x))[root.splitDimension];
// Both children are intersected by the ray.
if (0 < tPlane && tPlane < tmax)
{
// Classify the near and far children for the ray.
if (tMinDimPoint < tPlaneDimPoint)
{
nearChild = d_nodesList[root.leftChildIndex];
farChild = d_nodesList[root.rightChildIndex];
}
else
{
farChild = d_nodesList[root.leftChildIndex];
nearChild = d_nodesList[root.rightChildIndex];
}
stack[stackPosition++] = farChild;
stack[stackPosition++] = nearChild;
}
// Only one child is intersected.
else if (tMinDimPoint < tPlaneDimPoint)
{
nearChild = d_nodesList[root.leftChildIndex];
stack[stackPosition++] = nearChild;
}
else
{
nearChild = d_nodesList[root.rightChildIndex];
stack[stackPosition++] = nearChild;
}
// Iterative traversal of the kdtree using the stack.
while(stackPosition!=0)
{
Node currentNode = stack[stackPosition-1];
stackPosition--;
/* if (4 * stackPosition < stackMemory && stackMemory > 8)
{
stack = (Node *) realloc(stack, stackMemory * sizeof(Node) / 2);
stackMemory = stackMemory / 2;
}
*/
// if not a leaf
if(currentNode.leftChildIndex != -1)
{
// Find intersection point with splitting plane.
tPlane = planeIntersection(ray, currentNode.splitDimension, currentNode.splitPoint);
// Compute value along splitting dimension for plane, and tmin, tmax.
tPlanePoint = ray.getOrigin() + tPlane * ray.getDirection();
tPlaneDimPoint = (&(tPlanePoint.x))[currentNode.splitDimension];
tMinPoint = ray.getOrigin() + tmin * ray.getDirection();
tMinDimPoint = (&(tMinPoint.x))[currentNode.splitDimension];
tMaxPoint = ray.getOrigin() + tmax * ray.getDirection();
tMaxDimPoint = (&(tMaxPoint.x))[currentNode.splitDimension];
// Both children are intersected by the ray.
if (0 < tPlane && tPlane < tmax)
{
// Classify the near and far children for the ray.
if (tMinDimPoint < tPlaneDimPoint)
{
nearChild = d_nodesList[currentNode.leftChildIndex];
farChild = d_nodesList[currentNode.rightChildIndex];
}
else
{
farChild = d_nodesList[currentNode.leftChildIndex];
nearChild = d_nodesList[currentNode.rightChildIndex];
}
// Allocate more memory in the stack if needed.
/* if (stackPosition == stackMemory)
{
stack = (Node *) realloc (stack, 2 * stackMemory * sizeof(Node));
stackMemory = stackMemory * 2;
}
*/
stack[stackPosition++] = farChild;
stack[stackPosition++] = nearChild;
}
// Only one child is intersected.
else if (tMinDimPoint < tPlaneDimPoint)
{
Node nearChild = d_nodesList[currentNode.leftChildIndex];
// Allocate more memory in the stack if needed.
/* if (stackPosition == stackMemory)
{
stack = (Node *) realloc (stack, 2 * stackMemory * sizeof(Node));
stackMemory = stackMemory * 2;
}
*/
stack[stackPosition++] = nearChild;
}
else
{
Node nearChild = d_nodesList[currentNode.rightChildIndex];
// Allocate more memory in the stack if needed.
/* if (stackPosition == stackMemory)
{
stack = (Node *) realloc (stack, 2 * stackMemory * sizeof(Node));
stackMemory = stackMemory * 2;
}
*/
stack[stackPosition++] = nearChild;
}
}
// Leaf node
else
{
tclosest = FLT_MAX;
float t;
// Intersect ray with all triangles in the leaf. Find closest triangle. Return if any intersection is found.
for (int i=currentNode.startIndex; i < currentNode.endIndex; i++)
{
t = d_objects[i].intersect(ray);
if (t < tclosest)
{
tclosest = t;
closestObject = d_objects[i];
stackPosition = 0;
}
}
intersectionTests += (currentNode.endIndex - currentNode.startIndex ) - 1;
}
}
return tclosest;
}
}
__device__ float3 traceRay(Ray& ray, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count, Node *d_nodesList, long long int &intersections)
{
// Exit if MAX_DEPTH reached.
if (ray.getDepth() > MAX_DEPTH)
{
return make_float3(0.0, 0.0, 0.0);
}
else
{
float tmin = FLT_MAX;
Triangle closestObject;
// Find closest hit object.
tmin = closestHitObject(ray, d_objects, d_nodesList, closestObject, intersections);
// Some object was hit.
if (tmin != FLT_MAX)
{
// float3 final_color = make_float3(1.0, 1.0, 0.0);
float3 final_color = closestObject.shade(ray, tmin, d_objects, objects_count, lights, lights_count, cam);
/* if (closestObject.isReflective())
{
Ray reflectedRay = closestObject.getReflectedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kr * traceRay(reflectedRay, d_objects, objects_count, lights, lights_count, d_nodesList);
final_color = final_color + r_color;
}
if (closestObject.isRefractive())
{
Ray refractedRay = closestObject.getRefractedRay(ray, tmin);
float3 r_color = closestObject.getMaterial().kt * traceRay(refractedRay, d_objects, objects_count, lights, lights_count, d_nodesList);
final_color = final_color + r_color;
}
*/ return final_color;
}
else
{
return make_float3(0.0, 0.0, 0.0);
}
}
}
__global__ void colorPixel(unsigned char *d_image, Triangle *d_objects, int objects_count, PointLight *lights, int lights_count, long long int *times, Node *d_nodesList)
{
//long long int start = clock64();
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x >= c_image_width || y >= c_image_height)
return;
float3 ray_dir = cam.get_ray_direction(x, y);
Ray ray(cam.getPosition(), ray_dir);
long long int intersections = 0;
float3 pixel_color = traceRay(ray, d_objects, objects_count, lights, lights_count, d_nodesList, intersections);
//long long int end = clock64();
//long long int duration = end - start;
times[y * c_image_width + x] = intersections;
writePixel(x, y, c_image_width, pixel_color, d_image);
}
__device__ void writePixel(int x, int y, int image_width, float3 color, unsigned char *d_image)
{
d_image[(y*image_width + x) * 3 + 0] = (unsigned char) (color.x * 255);
d_image[(y*image_width + x) * 3 + 1] = (unsigned char) (color.y * 255);
d_image[(y*image_width + x) * 3 + 2] = (unsigned char) (color.z * 255);
}
|
87b68fa0d93b0687d564ca24081c12b1df5739e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "projektcuda.h"
//#include "mex.h"
/* Kernel to square elements of the array on the GPU */
__global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
//__shared__ float vOut[16];
if(idx == 0)out[0] = 0;
//if ( idx < N)vOut[idx] = in1[idx]*in2[idx];
if ( idx < N)out[idx] = in1[idx]*in2[idx];
__syncthreads();
//if(idx < N)out[0] += vOut[idx];
if(idx == 0) {
int i;
for ( i = 1; i < N; i++ ) {
//out[0] += vOut[i];
out[0] += out[i];
}
}
__syncthreads();
}
| 87b68fa0d93b0687d564ca24081c12b1df5739e5.cu | #include "cuda.h"
#include <stdio.h>
#include "projektcuda.h"
//#include "mex.h"
/* Kernel to square elements of the array on the GPU */
__global__ void device_dotMul(t_ve* in1, t_ve* in2,t_ve* out, unsigned int N)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
//__shared__ float vOut[16];
if(idx == 0)out[0] = 0;
//if ( idx < N)vOut[idx] = in1[idx]*in2[idx];
if ( idx < N)out[idx] = in1[idx]*in2[idx];
__syncthreads();
//if(idx < N)out[0] += vOut[idx];
if(idx == 0) {
int i;
for ( i = 1; i < N; i++ ) {
//out[0] += vOut[i];
out[0] += out[i];
}
}
__syncthreads();
}
|
b217a1a162c18597d22fe7cb5b87ad1f61652bcc.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
#include <hipcub/hipcub.hpp>
#define GPU_NUM_THREADS 256
template <typename scalar_t, typename accscalar_t>
__global__ void sampleMultinomialOnce(
int* dest,
int distributions,
int categories,
const scalar_t* sampled,
const scalar_t* dist,
int stride_dist,
int stride_categories)
{
__shared__ accscalar_t smem[GPU_NUM_THREADS];
__shared__ bool found;
__shared__ int foundPos;
typedef hipcub::BlockReduce<accscalar_t, GPU_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
sum += static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = BlockReduce(temp_storage).Sum(sum);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
foundPos = 0;
smem[0] = sum;
smem[1] = sampled[curDist];
}
__syncthreads();
sum = smem[0];
scalar_t sample = static_cast<scalar_t>(smem[1]);
__syncthreads();
// zero sum
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
accscalar_t prevHighProb = accZero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
accscalar_t val = accZero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its bucket
scalar_t curBucket =
static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb);
scalar_t prevBucket = static_cast<scalar_t>(
threadIdx.x == 0 ? prevHighProb
: smem[threadIdx.x - 1] + prevHighProb);
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
atomicMax(&foundPos, cat);
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0) {
if (found) {
dest[curDist] = foundPos;
} else {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
}
int main(int argc, char* argv[])
{
if (argc != 4) {
printf("Usage: %s <number of distributions> <number of categories> <repeat>\n", argv[0]);
return 1;
}
const int numDist = atoi(argv[1]);
const int numCategories = atoi(argv[2]);
const int repeat = atoi(argv[3]);
int sample_size_bytes = numDist * sizeof(float);
float *sample = (float*) malloc (sample_size_bytes);
std::default_random_engine g (123);
std::uniform_real_distribution<float> uniform_distr (0.f, 1.f);
for (int i = 0; i < numDist; i++) {
sample[i] = uniform_distr(g);
}
int result_size_bytes = numDist * sizeof(int);
int *result = (int*) malloc (result_size_bytes);
size_t distr_size_bytes = numDist * numCategories * sizeof(float);
float *distr = (float*) malloc (distr_size_bytes);
srand(123);
for (int i = 0; i < numDist; i++) {
for (int j = 0; j < numCategories; j++) {
// they don't need to sum to 1 in which case the values are weights
distr[i * numCategories + j] = rand() % 100 + 1;
}
}
float *d_sample;
hipMalloc((void**)&d_sample, sample_size_bytes);
hipMemcpy(d_sample, sample, sample_size_bytes, hipMemcpyHostToDevice);
float *d_distr;
hipMalloc((void**)&d_distr, distr_size_bytes);
hipMemcpy(d_distr, distr, distr_size_bytes, hipMemcpyHostToDevice);
int *d_result;
hipMalloc((void**)&d_result, result_size_bytes);
int requiredThreads = GPU_NUM_THREADS;
dim3 grid(512);
dim3 block(requiredThreads);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
hipLaunchKernelGGL(( sampleMultinomialOnce<float, float>) , dim3(grid), dim3(block), 0, 0,
d_result, numDist, numCategories, d_sample, d_distr, numCategories, 1);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of sampleMultinomialOnce kernel: %f (us)\n", (time * 1e-3f) / repeat);
hipMemcpy(result, d_result, result_size_bytes, hipMemcpyDeviceToHost);
double sum = 0, var = 0;
for (int i = 0; i < numDist; i++) sum += result[i];
sum = sum / numDist;
for (int i = 0; i < numDist; i++)
var += (result[i] - sum) * (result[i] - sum);
printf("Variance = %lf\n", var / numDist);
hipFree(d_result);
hipFree(d_sample);
hipFree(d_distr);
free(result);
free(sample);
free(distr);
return 0;
}
| b217a1a162c18597d22fe7cb5b87ad1f61652bcc.cu | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <random>
#include <hip/hip_runtime.h>
#include <hipcub/hipcub.hpp>
#define GPU_NUM_THREADS 256
template <typename scalar_t, typename accscalar_t>
__global__ void sampleMultinomialOnce(
int* dest,
int distributions,
int categories,
const scalar_t* sampled,
const scalar_t* dist,
int stride_dist,
int stride_categories)
{
__shared__ accscalar_t smem[GPU_NUM_THREADS];
__shared__ bool found;
__shared__ int foundPos;
typedef hipcub::BlockReduce<accscalar_t, GPU_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
accscalar_t accZero = static_cast<accscalar_t>(0);
scalar_t zero = static_cast<scalar_t>(0);
for (int curDist = blockIdx.x; curDist < distributions; curDist += gridDim.x) {
// Each block handles one distribution
// First pass, find the total sum of the distribution
accscalar_t sum = accZero;
scalar_t val;
for (int cat = threadIdx.x; cat < categories; cat += blockDim.x) {
val = dist[curDist * stride_dist + cat * stride_categories];
sum += static_cast<accscalar_t>(val);
}
// threadIdx.x == 0 has the sum value from this
sum = BlockReduce(temp_storage).Sum(sum);
// Broadcast sum and sample value
if (threadIdx.x == 0) {
// Make sure the sum of our distribution didn't overflow
foundPos = 0;
smem[0] = sum;
smem[1] = sampled[curDist];
}
__syncthreads();
sum = smem[0];
scalar_t sample = static_cast<scalar_t>(smem[1]);
__syncthreads();
// zero sum
if (sum == accZero) {
// Choose the first element
if (threadIdx.x == 0) {
dest[curDist] = 0;
}
continue;
}
int chunks = (categories + (int)blockDim.x - 1) / blockDim.x;
accscalar_t prevHighProb = accZero;
found = false;
for (int chunk = 0; chunk < chunks && !found; ++chunk) {
// All threads in bounds load a value
int cat = chunk * blockDim.x + threadIdx.x;
accscalar_t dist_val = cat < categories ?
static_cast<accscalar_t>(dist[curDist * stride_dist + cat * stride_categories]) / sum :
accZero;
smem[threadIdx.x] = dist_val;
__syncthreads();
// Perform an inclusive prefix sum of the shared memory contents
for (int offset = 1; offset < blockDim.x; offset *= 2) {
accscalar_t val = accZero;
if (threadIdx.x >= offset) {
val = smem[threadIdx.x - offset] + smem[threadIdx.x];
}
__syncthreads();
if (threadIdx.x >= offset) {
smem[threadIdx.x] = val;
}
__syncthreads();
}
// Each thread will check to see if the sample falls in its bucket
scalar_t curBucket =
static_cast<scalar_t>(smem[threadIdx.x] + prevHighProb);
scalar_t prevBucket = static_cast<scalar_t>(
threadIdx.x == 0 ? prevHighProb
: smem[threadIdx.x - 1] + prevHighProb);
bool inBucket =
(cat < categories) &&
(!(sample >= curBucket) &&
(sample >= prevBucket) &&
(dist_val > zero));
if (inBucket) {
atomicMax(&foundPos, cat);
found = true;
}
// Store the previous scan's high value for future use
prevHighProb = prevHighProb + smem[blockDim.x - 1];
__syncthreads();
}
if (threadIdx.x == 0) {
if (found) {
dest[curDist] = foundPos;
} else {
// This should address a rare bug where we don't select a valid index. This likely occurs when
// due to floating point arithmetic rounding errors, our cumulative sum does not add up to 1, but
// and our uniform sample is greater than this value. In this case we likely have unitialized memory
// in dest[curDist]. So basically we will loop through the distribution and pick the largest index
// where the distribution is non-zero. This is obviously terribly inefficient, but due to the
// rarity in which this occurs, this should not be an issue.
for (int cat = categories - 1; cat >= 0; --cat) {
if (dist[curDist * stride_dist + cat * stride_categories] > zero) {
dest[curDist] = cat;
break;
}
}
}
}
}
}
int main(int argc, char* argv[])
{
if (argc != 4) {
printf("Usage: %s <number of distributions> <number of categories> <repeat>\n", argv[0]);
return 1;
}
const int numDist = atoi(argv[1]);
const int numCategories = atoi(argv[2]);
const int repeat = atoi(argv[3]);
int sample_size_bytes = numDist * sizeof(float);
float *sample = (float*) malloc (sample_size_bytes);
std::default_random_engine g (123);
std::uniform_real_distribution<float> uniform_distr (0.f, 1.f);
for (int i = 0; i < numDist; i++) {
sample[i] = uniform_distr(g);
}
int result_size_bytes = numDist * sizeof(int);
int *result = (int*) malloc (result_size_bytes);
size_t distr_size_bytes = numDist * numCategories * sizeof(float);
float *distr = (float*) malloc (distr_size_bytes);
srand(123);
for (int i = 0; i < numDist; i++) {
for (int j = 0; j < numCategories; j++) {
// they don't need to sum to 1 in which case the values are weights
distr[i * numCategories + j] = rand() % 100 + 1;
}
}
float *d_sample;
hipMalloc((void**)&d_sample, sample_size_bytes);
hipMemcpy(d_sample, sample, sample_size_bytes, hipMemcpyHostToDevice);
float *d_distr;
hipMalloc((void**)&d_distr, distr_size_bytes);
hipMemcpy(d_distr, distr, distr_size_bytes, hipMemcpyHostToDevice);
int *d_result;
hipMalloc((void**)&d_result, result_size_bytes);
int requiredThreads = GPU_NUM_THREADS;
dim3 grid(512);
dim3 block(requiredThreads);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < repeat; i++) {
sampleMultinomialOnce<float, float> <<<grid, block>>>(
d_result, numDist, numCategories, d_sample, d_distr, numCategories, 1);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average execution time of sampleMultinomialOnce kernel: %f (us)\n", (time * 1e-3f) / repeat);
hipMemcpy(result, d_result, result_size_bytes, hipMemcpyDeviceToHost);
double sum = 0, var = 0;
for (int i = 0; i < numDist; i++) sum += result[i];
sum = sum / numDist;
for (int i = 0; i < numDist; i++)
var += (result[i] - sum) * (result[i] - sum);
printf("Variance = %lf\n", var / numDist);
hipFree(d_result);
hipFree(d_sample);
hipFree(d_distr);
free(result);
free(sample);
free(distr);
return 0;
}
|
b35a824cd2f45a5bd6c60bfdc41ecb5bee6c8c14.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <iostream>
#include <fstream>
#include <string>
#define N 1
#define M 128
#define Nmax 100000
#define num_samples 401
#define order 2
#define Nthreads M*N
__host__ __device__
unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__host__ __device__
unsigned int grid_map(int u1,int u2,int u3)
{
return u1*(Nmax)*(Nthreads)+u2*(Nthreads)+u3;
}
__device__
void filter_out(float a[],float b[],float y[],float u[],int Npts,int n_order)
{
int ii,jj;
for(ii=0;ii<Npts;ii++)y[ii] = 0;
for (ii = n_order; ii < (Npts); ii++)
{
for (jj = 1; jj <= n_order; jj++)
{
y[ii] = y[ii] - a[jj]*y[ii-jj];
}
for (jj = 0; jj <= n_order; jj++)
{
y[ii] = y[ii]+b[jj]*u[ii-jj];
}
y[ii] = y[ii]/a[0];
//if(abs(y[ii])>1)y[ii] = 10;
}
}
__device__
double RSS(float y1[],float y2[],int Npts)
{
int ii;
double total = 0;
for (ii = 0; ii < Npts; ii++)
{
total = total + pow((y1[ii]-y2[ii]),2);
}
return total;
}
__global__ void kernel(float * a_save,float* b_save,float* u,float* D) {
//Get thread number
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
//get the global index
int global_idx = index_y * grid_width + index_x;
//Set the Random generators up
unsigned int seed_normal = hash(global_idx);
unsigned int seed_uniform = hash(global_idx*256*256);
thrust::default_random_engine rng_normal(seed_normal);
thrust::default_random_engine rng_uniform(seed_uniform);
thrust::random::experimental::normal_distribution<float> dist_norm(0,1);
thrust::random::uniform_real_distribution<float> dist_uniform(0,1);
float b_curr[order+1],a_curr[order+1];
float b_cand[order+1],a_cand[order+1];
float y_cand[num_samples], y_curr[num_samples];
for(int ii =0;ii<order+1;ii++)
{
b_curr[ii] = dist_uniform(rng_uniform);
a_curr[ii] = dist_uniform(rng_uniform);
}
a_curr[0] = 1.0;
//printf("a_curr[2] = %f\n",a_curr[2]);
//Filter Output
filter_out(a_curr,b_curr,y_curr,u,num_samples,order);
//printf("u[1] = %f\n",u[1]);
//a_save[1] = 0.123456;
//printf("a_save[1] = %f\n",a_save[1]);
double chi_curr,chi_cand,ratio,a_ratio;
int flg = 0;
int accepted = 0;
int nn = 0;
int burnin = 0;
int count = 0;
double sigma = 1.0;
//RSS for error functions chi
chi_curr = RSS(D,y_curr,num_samples);
//a_save[grid_map(0,0,global_idx)] = dist_norm(rng_normal);
//a_save[grid_map(0,1,global_idx)] = dist_uniform(rng_uniform);
while(nn<=Nmax)
{
for(int ii=0;ii<order+1;ii++)
{
a_cand[ii] = a_curr[ii] + sigma*dist_norm(rng_normal);
b_cand[ii] = b_curr[ii] + sigma*dist_norm(rng_normal);
}
a_cand[0] = 1.0;
//printf("randn = %f\n",dist_norm(rng_normal));
//printf("a_cand[2] = %f\n",a_cand[2]);
//Filter Output
filter_out(a_cand,b_cand,y_cand,u,num_samples,order);
//Rss for candidate
chi_cand = RSS(D,y_cand,num_samples);
ratio = exp(-(chi_cand)+chi_curr);
/* if(nn%1000==0)
{
printf("ratio = %f\n",ratio);
printf("sigma = %f\n",sigma);
printf("chi_curr = %f\n",chi_curr);
printf("chi_cand = %f\n\n",chi_cand);
}
*/
if(dist_uniform(rng_uniform)<=ratio)
{
for(int ii=0;ii<order+1;ii++)
{
a_curr[ii] = a_cand[ii];
b_curr[ii] = b_cand[ii];
}
chi_curr = chi_cand;
accepted++;
}
if(count%1000==0 && count!=0 && flg==0)
{
a_ratio = (double)(accepted)/count;
//printf("a_ratio = %f\n",a_ratio);
//printf("sigma = %f\n",sigma);
if(a_ratio < 0.3)
{
sigma = sigma/1.2;
count = 0;
accepted = 0;
}
else if(a_ratio>0.4)
{
sigma = sigma*1.2;
count = 0;
accepted = 0;
}
else
{
burnin = nn-1;
flg = 1;
}
}
count++;
nn++;
if(flg==1)
{
for(int ii=0;ii<order+1;ii++)
{
a_save[grid_map(ii,nn,global_idx)] = a_curr[ii];
//printf("a_curr[1] = %f\n",a_curr[1]);
b_save[grid_map(ii,nn,global_idx)] = b_curr[ii];
}
}
}
//for(int ii=0;ii<Nmax;ii++)
// printf("a_save[1] = %f\n",a_save[grid_map(1,ii,global_idx)]);
}
int main(void)
{
//Read in Parameters
unsigned int num_samples1,order1;
float fs,fc,fnorm,dt,t1,t0;
std::ifstream params;
std::string fn = "data/params.dat";
params.open(fn.c_str());
params.ignore(10000,'\n');
params>>order1;
params>>fs;
params>>fc;
params>>fnorm;
params>>dt;
params>>t1;
params>>t0;
params>>num_samples1;
//Read in u t and D
thrust::device_vector<float> u,D,t;
thrust::host_vector<float> u_host;
std::ifstream u_dat,D_dat,t_dat;
u_dat.open("data/u.dat");
D_dat.open("data/D.dat");
t_dat.open("data/t.dat");
std::cout<<"order = "<<order<<std::endl;
std::cout<<"num_samples = "<<num_samples1<<std::endl;
float val;
for(int ii=0;ii<num_samples1;ii++)
{
u_dat>>val;
u.push_back(val);
D_dat>>val;
D.push_back(val);
t_dat>>val;
t.push_back(val);
}
u_dat.close();
D_dat.close();
t_dat.close();
//Define Host and device vectors
thrust::device_vector<float> a_save_dev(Nthreads*Nmax*(order+1));
thrust::host_vector<float> a_save_host(Nthreads*(order+1));
thrust::device_vector<float> b_save_dev(Nthreads*Nmax*(order+1));
thrust::host_vector<float> b_save_host(Nthreads*(order+1));
//Call Parallel MCMC passing in pointers to save files
float* a_save_ptr = thrust::raw_pointer_cast(a_save_dev.data());
float* b_save_ptr = thrust::raw_pointer_cast(b_save_dev.data());
float* D_ptr = thrust::raw_pointer_cast(D.data());
float* u_ptr = thrust::raw_pointer_cast(u.data());
hipLaunchKernelGGL(( kernel), dim3(N),dim3(M), 0, 0, a_save_ptr,b_save_ptr,u_ptr,D_ptr);
hipDeviceSynchronize();
//Transfer save files from dev to host
a_save_host = a_save_dev;
b_save_host = b_save_dev;
u_host = u;
//for(int ii=0;ii<num_samples;ii++)
// std::cout << "u[ii] = "<<u_host[ii]<<std::endl;
std::cout << "num_samples = "<<num_samples<<std::endl;
std::ofstream a_dat,b_dat;
a_dat.open("data/a.dat");
b_dat.open("data/b.dat");
for(int ii=0;ii<Nthreads;ii++)
{
for(int jj=0;jj<Nmax;jj++)
{
for(int kk=0;kk<order+1;kk++)
{
a_dat << a_save_host[grid_map(kk,jj,ii)] << "\t";
b_dat << b_save_host[grid_map(kk,jj,ii)] << "\t";
}
}
a_dat << std::endl;
b_dat <<std::endl;
}
return 0;
}
| b35a824cd2f45a5bd6c60bfdc41ecb5bee6c8c14.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/random.h>
#include <iostream>
#include <fstream>
#include <string>
#define N 1
#define M 128
#define Nmax 100000
#define num_samples 401
#define order 2
#define Nthreads M*N
__host__ __device__
unsigned int hash(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
__host__ __device__
unsigned int grid_map(int u1,int u2,int u3)
{
return u1*(Nmax)*(Nthreads)+u2*(Nthreads)+u3;
}
__device__
void filter_out(float a[],float b[],float y[],float u[],int Npts,int n_order)
{
int ii,jj;
for(ii=0;ii<Npts;ii++)y[ii] = 0;
for (ii = n_order; ii < (Npts); ii++)
{
for (jj = 1; jj <= n_order; jj++)
{
y[ii] = y[ii] - a[jj]*y[ii-jj];
}
for (jj = 0; jj <= n_order; jj++)
{
y[ii] = y[ii]+b[jj]*u[ii-jj];
}
y[ii] = y[ii]/a[0];
//if(abs(y[ii])>1)y[ii] = 10;
}
}
__device__
double RSS(float y1[],float y2[],int Npts)
{
int ii;
double total = 0;
for (ii = 0; ii < Npts; ii++)
{
total = total + pow((y1[ii]-y2[ii]),2);
}
return total;
}
__global__ void kernel(float * a_save,float* b_save,float* u,float* D) {
//Get thread number
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int grid_width = gridDim.x * blockDim.x;
//get the global index
int global_idx = index_y * grid_width + index_x;
//Set the Random generators up
unsigned int seed_normal = hash(global_idx);
unsigned int seed_uniform = hash(global_idx*256*256);
thrust::default_random_engine rng_normal(seed_normal);
thrust::default_random_engine rng_uniform(seed_uniform);
thrust::random::experimental::normal_distribution<float> dist_norm(0,1);
thrust::random::uniform_real_distribution<float> dist_uniform(0,1);
float b_curr[order+1],a_curr[order+1];
float b_cand[order+1],a_cand[order+1];
float y_cand[num_samples], y_curr[num_samples];
for(int ii =0;ii<order+1;ii++)
{
b_curr[ii] = dist_uniform(rng_uniform);
a_curr[ii] = dist_uniform(rng_uniform);
}
a_curr[0] = 1.0;
//printf("a_curr[2] = %f\n",a_curr[2]);
//Filter Output
filter_out(a_curr,b_curr,y_curr,u,num_samples,order);
//printf("u[1] = %f\n",u[1]);
//a_save[1] = 0.123456;
//printf("a_save[1] = %f\n",a_save[1]);
double chi_curr,chi_cand,ratio,a_ratio;
int flg = 0;
int accepted = 0;
int nn = 0;
int burnin = 0;
int count = 0;
double sigma = 1.0;
//RSS for error functions chi
chi_curr = RSS(D,y_curr,num_samples);
//a_save[grid_map(0,0,global_idx)] = dist_norm(rng_normal);
//a_save[grid_map(0,1,global_idx)] = dist_uniform(rng_uniform);
while(nn<=Nmax)
{
for(int ii=0;ii<order+1;ii++)
{
a_cand[ii] = a_curr[ii] + sigma*dist_norm(rng_normal);
b_cand[ii] = b_curr[ii] + sigma*dist_norm(rng_normal);
}
a_cand[0] = 1.0;
//printf("randn = %f\n",dist_norm(rng_normal));
//printf("a_cand[2] = %f\n",a_cand[2]);
//Filter Output
filter_out(a_cand,b_cand,y_cand,u,num_samples,order);
//Rss for candidate
chi_cand = RSS(D,y_cand,num_samples);
ratio = exp(-(chi_cand)+chi_curr);
/* if(nn%1000==0)
{
printf("ratio = %f\n",ratio);
printf("sigma = %f\n",sigma);
printf("chi_curr = %f\n",chi_curr);
printf("chi_cand = %f\n\n",chi_cand);
}
*/
if(dist_uniform(rng_uniform)<=ratio)
{
for(int ii=0;ii<order+1;ii++)
{
a_curr[ii] = a_cand[ii];
b_curr[ii] = b_cand[ii];
}
chi_curr = chi_cand;
accepted++;
}
if(count%1000==0 && count!=0 && flg==0)
{
a_ratio = (double)(accepted)/count;
//printf("a_ratio = %f\n",a_ratio);
//printf("sigma = %f\n",sigma);
if(a_ratio < 0.3)
{
sigma = sigma/1.2;
count = 0;
accepted = 0;
}
else if(a_ratio>0.4)
{
sigma = sigma*1.2;
count = 0;
accepted = 0;
}
else
{
burnin = nn-1;
flg = 1;
}
}
count++;
nn++;
if(flg==1)
{
for(int ii=0;ii<order+1;ii++)
{
a_save[grid_map(ii,nn,global_idx)] = a_curr[ii];
//printf("a_curr[1] = %f\n",a_curr[1]);
b_save[grid_map(ii,nn,global_idx)] = b_curr[ii];
}
}
}
//for(int ii=0;ii<Nmax;ii++)
// printf("a_save[1] = %f\n",a_save[grid_map(1,ii,global_idx)]);
}
int main(void)
{
//Read in Parameters
unsigned int num_samples1,order1;
float fs,fc,fnorm,dt,t1,t0;
std::ifstream params;
std::string fn = "data/params.dat";
params.open(fn.c_str());
params.ignore(10000,'\n');
params>>order1;
params>>fs;
params>>fc;
params>>fnorm;
params>>dt;
params>>t1;
params>>t0;
params>>num_samples1;
//Read in u t and D
thrust::device_vector<float> u,D,t;
thrust::host_vector<float> u_host;
std::ifstream u_dat,D_dat,t_dat;
u_dat.open("data/u.dat");
D_dat.open("data/D.dat");
t_dat.open("data/t.dat");
std::cout<<"order = "<<order<<std::endl;
std::cout<<"num_samples = "<<num_samples1<<std::endl;
float val;
for(int ii=0;ii<num_samples1;ii++)
{
u_dat>>val;
u.push_back(val);
D_dat>>val;
D.push_back(val);
t_dat>>val;
t.push_back(val);
}
u_dat.close();
D_dat.close();
t_dat.close();
//Define Host and device vectors
thrust::device_vector<float> a_save_dev(Nthreads*Nmax*(order+1));
thrust::host_vector<float> a_save_host(Nthreads*(order+1));
thrust::device_vector<float> b_save_dev(Nthreads*Nmax*(order+1));
thrust::host_vector<float> b_save_host(Nthreads*(order+1));
//Call Parallel MCMC passing in pointers to save files
float* a_save_ptr = thrust::raw_pointer_cast(a_save_dev.data());
float* b_save_ptr = thrust::raw_pointer_cast(b_save_dev.data());
float* D_ptr = thrust::raw_pointer_cast(D.data());
float* u_ptr = thrust::raw_pointer_cast(u.data());
kernel<<<N,M>>>(a_save_ptr,b_save_ptr,u_ptr,D_ptr);
cudaDeviceSynchronize();
//Transfer save files from dev to host
a_save_host = a_save_dev;
b_save_host = b_save_dev;
u_host = u;
//for(int ii=0;ii<num_samples;ii++)
// std::cout << "u[ii] = "<<u_host[ii]<<std::endl;
std::cout << "num_samples = "<<num_samples<<std::endl;
std::ofstream a_dat,b_dat;
a_dat.open("data/a.dat");
b_dat.open("data/b.dat");
for(int ii=0;ii<Nthreads;ii++)
{
for(int jj=0;jj<Nmax;jj++)
{
for(int kk=0;kk<order+1;kk++)
{
a_dat << a_save_host[grid_map(kk,jj,ii)] << "\t";
b_dat << b_save_host[grid_map(kk,jj,ii)] << "\t";
}
}
a_dat << std::endl;
b_dat <<std::endl;
}
return 0;
}
|
43d37814f6c00039812fa71880b1c9559afd8703.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com, created on 30.11.17.
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <ops/declarable/helpers/col2im.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW]
template <typename T>
static __global__ void col2imCuda(const void* columns, const Nd4jLong* colShapeInfo, void* image, const Nd4jLong* imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
const T* col = reinterpret_cast<const T*>(columns);
T* im = reinterpret_cast<T*>(image);
__shared__ int colRank, imRank, kHeff, kWeff, oH, oW;
__shared__ Nd4jLong *sharedMem, imLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
oH = colShapeInfo[5];
oW = colShapeInfo[6];
kHeff = colShapeInfo[3] + (colShapeInfo[3] - 1) * (dH - 1);
kWeff = colShapeInfo[4] + (colShapeInfo[4] - 1) * (dW - 1);
imRank = 4;
colRank = 6;
imLen = shape::length(imShapeInfo);
}
__syncthreads();
const auto imInd = threadIdx.x + blockIdx.x * blockDim.x;
if(imInd >= imLen)
return;
auto coords = sharedMem + threadIdx.x * colRank;
shape::index2coords(imRank, imShapeInfo + 1, imInd, imLen, coords);
const auto imOffset = shape::getOffset(0, imShapeInfo + 1, imShapeInfo + imRank + 1, coords, imRank);
const int imH = coords[2] + pH;
const int imW = coords[3] + pW;
const int colHstart = (imH < kHeff) ? 0 : (imH - kHeff) / sH + 1;
const int colWstart = (imW < kWeff) ? 0 : (imW - kWeff) / sW + 1;
const int colHend = nd4j::math::nd4j_min<int>(imH / sH + 1, oH);
const int colWend = nd4j::math::nd4j_min<int>(imW / sW + 1, oW);
T val = 0;
for(coords[4] = colHstart; coords[4] < colHend; ++coords[4]) {
coords[2] = imH - coords[4] * sH;
for(coords[5] = colWstart; coords[5] < colWend; ++coords[5]) {
coords[3] = imW - coords[5] * sW;
if(coords[2] % dH == 0 && coords[3] % dW == 0) {
coords[2] /= dH;
coords[3] /= dW;
val += col[shape::getOffset(0, colShapeInfo + 1, colShapeInfo + colRank + 1, coords, colRank)];
}
}
}
im[imOffset] = val;
}
////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW]
template<typename T>
__global__ static void col2imCuda2(const void *columns, void *image, const Nd4jLong *colShapeInfo, const Nd4jLong *imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
const auto col = reinterpret_cast<const T*>(columns);
auto im = reinterpret_cast<T*>(image);
auto colShape = shape::shapeOf(const_cast<Nd4jLong *>(colShapeInfo));
auto colStride = shape::stride(const_cast<Nd4jLong *>(colShapeInfo));
int colStride0 = colStride[0];
int colStride1 = colStride[1];
int colStride2 = colStride[2];
int colStride3 = colStride[3];
int colStride4 = colStride[4];
int colStride5 = colStride[5];
int kH = colShape[2];
int kW = colShape[3];
auto imShape = shape::shapeOf(const_cast<Nd4jLong *>(imShapeInfo));
auto imOrder = shape::order(const_cast<Nd4jLong *>(imShapeInfo));
auto imStride = shape::stride(const_cast<Nd4jLong *>(imShapeInfo));
int bS = imShape[0];
int iC = imShape[1];
int iH = imShape[2];
int iW = imShape[3];
int oH = colShape[4];//(iH + 2 * pH - kH) / sW + 1;
int oW = colShape[5];//(iW + 2 * pW - kW) / sH + 1;
int n = bS * iC * iH * iW;
//Effective kernel size, accounting for dilation
int kHeff = kH + (kH - 1) * (dH - 1);
int kWeff = kW + (kW - 1) * (dW - 1);
for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
T val = 0;
int w_im = i % iW + pW;
int h_im = (i / iW) % iH + pH;
int c_im = i / (iW * iH);
int b = c_im / iC;
int c = c_im % iC;
// compute the start and end of the output
// These are the indexes for dimensions ??? in the 6d col matrix
int w_col_start = (w_im < kWeff) ? 0 : (w_im - kWeff) / sW + 1;
int w_col_end = nd4j::math::nd4j_min<int>(w_im / sW + 1, oW);
int h_col_start = (h_im < kHeff) ? 0 : (h_im - kHeff) / sH + 1;
int h_col_end = nd4j::math::nd4j_min<int>(h_im / sH + 1, oH);
//Iterate over col entries in the 6d array... these are added up
for (int colH = h_col_start; colH < h_col_end; colH += 1) {
for (int colW = w_col_start; colW < w_col_end; colW += 1) {
int kRow = (h_im - colH * sH);
int kCol = (w_im - colW * sW);
if(kRow % dH == 0 && kCol % dW == 0){
kRow /= dH;
kCol /= dW;
int data_col_index = b * colStride0 + c * colStride1 + kRow * colStride2 + kCol * colStride3 + colH * colStride4 + colW * colStride5;
val += col[data_col_index];
}
}
}
int i_f = 0;
int i_c = i;
for (int dim = 3; dim >= 0; dim--) {
i_f += (i_c % imShape[dim]) * imStride[dim];
i_c = i_c / imShape[dim];
}
im[i_f] = val;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void col2imCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* columns, const Nd4jLong* colShapeInfo,
void* image, const Nd4jLong* imShapeInfo,
const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
// col2imCuda2<T><<<512, 512, 1024, *stream>>>(columns, image, colShapeInfo, imShapeInfo, sH, sW, pH, pW, dH, dW);
hipLaunchKernelGGL(( col2imCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, columns, colShapeInfo, image, imShapeInfo, sH, sW, pH, pW, dH, dW);
}
BUILD_SINGLE_TEMPLATE(template void col2imCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t* stream, const void *col, const Nd4jLong *colShapeInfo, void *im, const Nd4jLong *imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void col2im(nd4j::LaunchContext& context, const NDArray& col, NDArray& im, const int sH, const int sW, const int pH, const int pW, const int iH, const int iW, const int dH, const int dW) {
PointersManager manager(&context, "col2im");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (im.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&im}, {&col});
BUILD_SINGLE_SELECTOR(im.dataType(), col2imCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context.getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), im.specialBuffer(), im.specialShapeInfo(), sH, sW, pH, pW, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&im}, {&col});
manager.synchronize();
}
}
}
} | 43d37814f6c00039812fa71880b1c9559afd8703.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com, created on 30.11.17.
// @author Yurii Shyrma (iuriish@yahoo.com)
//
#include <ops/declarable/helpers/col2im.h>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW]
template <typename T>
static __global__ void col2imCuda(const void* columns, const Nd4jLong* colShapeInfo, void* image, const Nd4jLong* imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
const T* col = reinterpret_cast<const T*>(columns);
T* im = reinterpret_cast<T*>(image);
__shared__ int colRank, imRank, kHeff, kWeff, oH, oW;
__shared__ Nd4jLong *sharedMem, imLen;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sharedMem = reinterpret_cast<Nd4jLong*>(shmem);
oH = colShapeInfo[5];
oW = colShapeInfo[6];
kHeff = colShapeInfo[3] + (colShapeInfo[3] - 1) * (dH - 1);
kWeff = colShapeInfo[4] + (colShapeInfo[4] - 1) * (dW - 1);
imRank = 4;
colRank = 6;
imLen = shape::length(imShapeInfo);
}
__syncthreads();
const auto imInd = threadIdx.x + blockIdx.x * blockDim.x;
if(imInd >= imLen)
return;
auto coords = sharedMem + threadIdx.x * colRank;
shape::index2coords(imRank, imShapeInfo + 1, imInd, imLen, coords);
const auto imOffset = shape::getOffset(0, imShapeInfo + 1, imShapeInfo + imRank + 1, coords, imRank);
const int imH = coords[2] + pH;
const int imW = coords[3] + pW;
const int colHstart = (imH < kHeff) ? 0 : (imH - kHeff) / sH + 1;
const int colWstart = (imW < kWeff) ? 0 : (imW - kWeff) / sW + 1;
const int colHend = nd4j::math::nd4j_min<int>(imH / sH + 1, oH);
const int colWend = nd4j::math::nd4j_min<int>(imW / sW + 1, oW);
T val = 0;
for(coords[4] = colHstart; coords[4] < colHend; ++coords[4]) {
coords[2] = imH - coords[4] * sH;
for(coords[5] = colWstart; coords[5] < colWend; ++coords[5]) {
coords[3] = imW - coords[5] * sW;
if(coords[2] % dH == 0 && coords[3] % dW == 0) {
coords[2] /= dH;
coords[3] /= dW;
val += col[shape::getOffset(0, colShapeInfo + 1, colShapeInfo + colRank + 1, coords, colRank)];
}
}
}
im[imOffset] = val;
}
////////////////////////////////////////////////////////////////////////
// columns [bS, iC, kH, kW, oH, oW] to be de-convoluted to image [bS, iC, iH, iW]
template<typename T>
__global__ static void col2imCuda2(const void *columns, void *image, const Nd4jLong *colShapeInfo, const Nd4jLong *imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
const auto col = reinterpret_cast<const T*>(columns);
auto im = reinterpret_cast<T*>(image);
auto colShape = shape::shapeOf(const_cast<Nd4jLong *>(colShapeInfo));
auto colStride = shape::stride(const_cast<Nd4jLong *>(colShapeInfo));
int colStride0 = colStride[0];
int colStride1 = colStride[1];
int colStride2 = colStride[2];
int colStride3 = colStride[3];
int colStride4 = colStride[4];
int colStride5 = colStride[5];
int kH = colShape[2];
int kW = colShape[3];
auto imShape = shape::shapeOf(const_cast<Nd4jLong *>(imShapeInfo));
auto imOrder = shape::order(const_cast<Nd4jLong *>(imShapeInfo));
auto imStride = shape::stride(const_cast<Nd4jLong *>(imShapeInfo));
int bS = imShape[0];
int iC = imShape[1];
int iH = imShape[2];
int iW = imShape[3];
int oH = colShape[4];//(iH + 2 * pH - kH) / sW + 1;
int oW = colShape[5];//(iW + 2 * pW - kW) / sH + 1;
int n = bS * iC * iH * iW;
//Effective kernel size, accounting for dilation
int kHeff = kH + (kH - 1) * (dH - 1);
int kWeff = kW + (kW - 1) * (dW - 1);
for (int i = (blockDim.x * blockIdx.x) + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
T val = 0;
int w_im = i % iW + pW;
int h_im = (i / iW) % iH + pH;
int c_im = i / (iW * iH);
int b = c_im / iC;
int c = c_im % iC;
// compute the start and end of the output
// These are the indexes for dimensions ??? in the 6d col matrix
int w_col_start = (w_im < kWeff) ? 0 : (w_im - kWeff) / sW + 1;
int w_col_end = nd4j::math::nd4j_min<int>(w_im / sW + 1, oW);
int h_col_start = (h_im < kHeff) ? 0 : (h_im - kHeff) / sH + 1;
int h_col_end = nd4j::math::nd4j_min<int>(h_im / sH + 1, oH);
//Iterate over col entries in the 6d array... these are added up
for (int colH = h_col_start; colH < h_col_end; colH += 1) {
for (int colW = w_col_start; colW < w_col_end; colW += 1) {
int kRow = (h_im - colH * sH);
int kCol = (w_im - colW * sW);
if(kRow % dH == 0 && kCol % dW == 0){
kRow /= dH;
kCol /= dW;
int data_col_index = b * colStride0 + c * colStride1 + kRow * colStride2 + kCol * colStride3 + colH * colStride4 + colW * colStride5;
val += col[data_col_index];
}
}
}
int i_f = 0;
int i_c = i;
for (int dim = 3; dim >= 0; dim--) {
i_f += (i_c % imShape[dim]) * imStride[dim];
i_c = i_c / imShape[dim];
}
im[i_f] = val;
}
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static void col2imCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* columns, const Nd4jLong* colShapeInfo,
void* image, const Nd4jLong* imShapeInfo,
const int sH, const int sW, const int pH, const int pW, const int dH, const int dW) {
// col2imCuda2<T><<<512, 512, 1024, *stream>>>(columns, image, colShapeInfo, imShapeInfo, sH, sW, pH, pW, dH, dW);
col2imCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(columns, colShapeInfo, image, imShapeInfo, sH, sW, pH, pW, dH, dW);
}
BUILD_SINGLE_TEMPLATE(template void col2imCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t* stream, const void *col, const Nd4jLong *colShapeInfo, void *im, const Nd4jLong *imShapeInfo, const int sH, const int sW, const int pH, const int pW, const int dH, const int dW), FLOAT_TYPES);
//////////////////////////////////////////////////////////////////////////
void col2im(nd4j::LaunchContext& context, const NDArray& col, NDArray& im, const int sH, const int sW, const int pH, const int pW, const int iH, const int iW, const int dH, const int dW) {
PointersManager manager(&context, "col2im");
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (im.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
const int sharedMem = col.rankOf() * sizeof(Nd4jLong) * threadsPerBlock + 128;
NDArray::prepareSpecialUse({&im}, {&col});
BUILD_SINGLE_SELECTOR(im.dataType(), col2imCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context.getCudaStream(), col.getSpecialBuffer(), col.getSpecialShapeInfo(), im.specialBuffer(), im.specialShapeInfo(), sH, sW, pH, pW, dH, dW), FLOAT_TYPES);
NDArray::registerSpecialUse({&im}, {&col});
manager.synchronize();
}
}
}
} |
3352965b61de766f100919811f38d60134e2eefb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "std_incl.h"
#include "utils.h"
#include <cassert>
#include <cstdlib>
#include <stdio.h>
#include <windows.h>
#include <cstdarg>
#include <valarray>
#include "random_distr.h"
#include <stdint.h>
#include "gpu_utils.h"
#include "QueuedCUDATracker.h"
#include "QueuedCPUTracker.h"
#include "../cputrack-test/SharedTests.h"
#include "BenchmarkLUT.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include "FisherMatrix.h"
#include "testutils.h"
#include "ResultManager.h"
#include "ExtractBeadImages.h"
void BenchmarkParams();
std::string getPath(const char *file)
{
std::string s = file;
int pos = s.length()-1;
while (pos>0 && s[pos]!='\\' && s[pos]!= '/' )
pos--;
return s.substr(0, pos);
}
inline __device__ float2 mul_conjugate(float2 a, float2 b)
{
float2 r;
r.x = a.x*b.x + a.y*b.y;
r.y = a.y*b.x - a.x*b.y;
return r;
}
void ShowCUDAError() {
hipError_t err = hipGetLastError();
dbgprintf("Cuda error: %s\n", hipGetErrorString(err));
}
__shared__ float cudaSharedMem[];
__device__ float compute(int idx, float* buf, int s)
{
// some random calcs to make the kernel unempty
float k=0.0f;
for (int x=0;x<s;x++ ){
k+=cosf(x*0.1f*idx);
buf[x]=k;
}
for (int x=0;x<s/2;x++){
buf[x]=buf[x]*buf[x];
}
float sum=0.0f;
for (int x=s-1;x>=1;x--) {
sum += buf[x-1]/(fabsf(buf[x])+0.1f);
}
return sum;
}
__global__ void testWithGlobal(int n, int s, float* result, float* buf) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
result [idx] = compute(idx, &buf [idx * s],s);
}
}
__global__ void testWithShared(int n, int s, float* result) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
result [idx] = compute(idx, &cudaSharedMem[threadIdx.x * s],s);
}
}
void TestSharedMem()
{
int n=100, s=200;
dim3 nthreads(32), nblocks( (n+nthreads.x-1)/nthreads.x);
device_vec<float> buf(n*s);
device_vec<float> result_s(n), result_g(n);
double t0 = GetPreciseTime();
hipLaunchKernelGGL(( testWithGlobal), dim3(nblocks),dim3(nthreads), 0, 0, n,s,result_g.data,buf.data);
hipDeviceSynchronize();
double t1 = GetPreciseTime();
hipLaunchKernelGGL(( testWithShared) , dim3(nblocks),dim3(nthreads),s*sizeof(float)*nthreads.x, 0, n,s,result_s.data);
hipDeviceSynchronize();
double t2 = GetPreciseTime();
std::vector<float> rs = result_s, rg = result_g;
for (int x=0;x<n;x++) {
dbgprintf("result_s[%d]=%f. result_g[%d]=%f\n", x,rs[x], x,rg[x]);
}
dbgprintf("Speed of shared comp: %f, speed of global comp: %f\n", n/(t2-t1), n/(t1-t0));
}
void QTrkCompareTest()
{
QTrkSettings cfg;
cfg.width = cfg.height = 40;
cfg.qi_iterations = 1;
cfg.xc1_iterations = 2;
cfg.xc1_profileLength = 64;
cfg.numThreads = -1;
cfg.com_bgcorrection = 0.0f;
bool haveZLUT = false;
#ifdef _DEBUG
cfg.numThreads = 2;
cfg.qi_iterations=1;
int total= 10;
int batchSize = 2;
haveZLUT=false;
#else
cfg.numThreads = 4;
int total = 10000;
int batchSize = 512;
#endif
QueuedCUDATracker qtrk(cfg, batchSize);
QueuedCPUTracker qtrkcpu(cfg);
ImageData img = ImageData::alloc(cfg.width,cfg.height);
bool cpucmp = true;
qtrk.EnableTextureCache(true);
srand(1);
// Generate ZLUT
int zplanes=100;
float zmin=0.5,zmax=3;
qtrk.SetRadialZLUT(0, 1, zplanes);
if (cpucmp) qtrkcpu.SetRadialZLUT(0, 1, zplanes);
if (haveZLUT) {
for (int x=0;x<zplanes;x++) {
vector2f center ( cfg.width/2, cfg.height/2 );
float s = zmin + (zmax-zmin) * x/(float)(zplanes-1);
GenerateTestImage(img, center.x, center.y, s, 0.0f);
WriteJPEGFile("qtrkzlutimg.jpg", img);
qtrk.BuildLUT(img.data,img.pitch(),QTrkFloat, 0, (vector2f*)(0));
if (cpucmp)
qtrkcpu.BuildLUT(img.data,img.pitch(),QTrkFloat, 0);
}
qtrk.FinalizeLUT();
if (cpucmp) qtrkcpu.FinalizeLUT();
// wait to finish ZLUT
while(true) {
int rc = qtrk.GetResultCount();
if (rc == zplanes) break;
Sleep(100);
dbgprintf(".");
}
if (cpucmp) {
while(qtrkcpu.GetResultCount() != zplanes);
}
}
float* zlut = new float[qtrk.cfg.zlut_radialsteps*zplanes];
qtrk.GetRadialZLUT(zlut);
if (cpucmp) {
float* zlutcpu = new float[qtrkcpu.cfg.zlut_radialsteps*zplanes];
qtrkcpu.GetRadialZLUT(zlutcpu);
WriteImageAsCSV("zlut-cpu.txt", zlutcpu, qtrkcpu.cfg.zlut_radialsteps, zplanes);
WriteImageAsCSV("zlut-gpu.txt", zlut, qtrkcpu.cfg.zlut_radialsteps, zplanes);
delete[] zlutcpu;
}
qtrk.ClearResults();
if (cpucmp) qtrkcpu.ClearResults();
FloatToJPEGFile ("qtrkzlutcuda.jpg", zlut, qtrk.cfg.zlut_radialsteps, zplanes);
delete[] zlut;
// Schedule images to localize on
dbgprintf("Benchmarking...\n", total);
GenerateTestImage(img, cfg.width/2, cfg.height/2, (zmin+zmax)/2, 0);
double tstart = GetPreciseTime();
int rc = 0, displayrc=0;
LocMode_t flags = (LocMode_t)(LT_NormalizeProfile |LT_QI| (haveZLUT ? LT_LocalizeZ : 0) );
qtrk.SetLocalizationMode(flags);
qtrkcpu.SetLocalizationMode(flags);
for (int n=0;n<total;n++) {
LocalizationJob jobInfo;
jobInfo.frame = n;
jobInfo.zlutIndex = 0;
qtrk.ScheduleLocalization((uchar*)img.data, cfg.width*sizeof(float), QTrkFloat,&jobInfo);
if (cpucmp) qtrkcpu.ScheduleLocalization((uchar*)img.data, cfg.width*sizeof(float), QTrkFloat, &jobInfo);
if (n % 10 == 0) {
rc = qtrk.GetResultCount();
while (displayrc<rc) {
if( displayrc%(total/10)==0) dbgprintf("Done: %d / %d\n", displayrc, total);
displayrc++;
}
}
}
if (cpucmp) qtrkcpu.Flush();
WaitForFinish(&qtrk, total);
// Measure speed
double tend = GetPreciseTime();
if (cpucmp) {
dbgprintf("waiting for cpu results..\n");
while (total != qtrkcpu.GetResultCount())
Sleep(10);
}
img.free();
const int NumResults = 20;
LocalizationResult results[NumResults], resultscpu[NumResults];
int rcount = ::min(NumResults,total);
for (int i=0;i<rcount;i++) {
qtrk.FetchResults(&results[i], 1);
if (cpucmp) qtrkcpu.FetchResults(&resultscpu[i], 1);
}
// if you wonder about this syntax, google C++ lambda functions
std::sort(results, results+rcount, [](LocalizationResult a, LocalizationResult b) -> bool { return a.job.frame > b.job.frame; });
if(cpucmp) std::sort(resultscpu, resultscpu+rcount, [](LocalizationResult a, LocalizationResult b) -> bool { return a.job.frame > b.job.frame; });
for (int i=0;i<rcount;i++) {
LocalizationResult& r = results[i];
dbgprintf("gpu [%d] x: %f, y: %f. z: %+g, COM: %f, %f\n", i,r.pos.x, r.pos.y, r.pos.z, r.firstGuess.x, r.firstGuess.y);
if (cpucmp) {
r = resultscpu[i];
dbgprintf("cpu [%d] x: %f, y: %f. z: %+g, COM: %f, %f\n", i,r.pos.x, r.pos.y, r.pos.z, r.firstGuess.x, r.firstGuess.y);
}
}
dbgprintf("Localization Speed: %d (img/s)\n", (int)( total/(tend-tstart) ));
}
void listDevices()
{
hipDeviceProp_t prop;
int dc;
hipGetDeviceCount(&dc);
for (int k=0;k<dc;k++) {
hipGetDeviceProperties(&prop, k);
dbgprintf("Device[%d] = %s\n", k, prop.name);
dbgprintf("\tMax texture width: %d\n" ,prop.maxTexture2D[0]);
}
}
__global__ void SimpleKernel(int N, float* a){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
for (int x=0;x<1000;x++)
a[idx] = asin(a[idx]+x);
}
}
void TestAsync()
{
int N =100000;
int nt = 32;
pinned_array<float> a(N);
// hipHostMalloc(&a, sizeof(float)*N, 0);
device_vec<float> A(N);
hipStream_t s0;
hipEvent_t done;
hipStreamCreate(&s0);
hipEventCreate(&done,0);
for (int x=0;x<N;x++)
a[x] = cos(x*0.01f);
for (int x=0;x<1;x++) {
{ MeasureTime mt("a->A"); A.copyToDevice(a.data(), N, true); }
{ MeasureTime mt("func(A)");
hipLaunchKernelGGL(( SimpleKernel), dim3(dim3( (N+nt-1)/nt )), dim3(dim3(nt)), 0, 0, N, A.data);
}
{ MeasureTime mt("A->a"); A.copyToHost(a.data(), true); }
}
hipEventRecord(done);
{
MeasureTime("sync..."); while (hipEventQuery(done) != hipSuccess);
}
hipStreamDestroy(s0);
hipEventDestroy(done);
}
__global__ void emptyKernel()
{}
float SpeedTest(const QTrkSettings& cfg, QueuedTracker* qtrk, int count, bool haveZLUT, LocMode_t locType, float* scheduleTime, bool gaincorrection=false)
{
ImageData img=ImageData::alloc(cfg.width,cfg.height);
srand(1);
// Generate ZLUT
int zplanes=100;
float zmin=0.5,zmax=3;
qtrk->SetRadialZLUT(0, 1, zplanes);
if (gaincorrection) EnableGainCorrection(qtrk);
if (haveZLUT) {
for (int x=0;x<zplanes;x++) {
vector2f center( cfg.width/2, cfg.height/2 );
float s = zmin + (zmax-zmin) * x/(float)(zplanes-1);
GenerateTestImage(img, center.x, center.y, s, 0.0f);
qtrk->BuildLUT(img.data,img.pitch(),QTrkFloat, 0);
}
qtrk->FinalizeLUT();
}
qtrk->ClearResults();
// Schedule images to localize on
dbgprintf("Benchmarking...\n", count);
GenerateTestImage(img, cfg.width/2, cfg.height/2, (zmin+zmax)/2, 0);
double tstart = GetPreciseTime();
int rc = 0, displayrc=0;
double maxScheduleTime = 0.0f;
double sumScheduleTime2 = 0.0f;
double sumScheduleTime = 0.0f;
qtrk->SetLocalizationMode(locType| (haveZLUT ? LT_LocalizeZ : 0));
for (int n=0;n<count;n++) {
double t0 = GetPreciseTime();
///qtrk->ScheduleLocalization((uchar*)image, cfg.width*sizeof(float), QTrkFloat, flags, n, 0, 0, 0, 0);
ROIPosition roipos[]={ {0,0} };
LocalizationJob job(n, 0, 0,0);
qtrk->ScheduleFrame((uchar*)img.data, cfg.width*sizeof(float),cfg.width,cfg.height, roipos, 1, QTrkFloat, &job);
double dt = GetPreciseTime() - t0;
maxScheduleTime = ::max(maxScheduleTime, dt);
sumScheduleTime += dt;
sumScheduleTime2 += dt*dt;
if (n % 10 == 0) {
rc = qtrk->GetResultCount();
while (displayrc<rc) {
if( displayrc%(count/10)==0) dbgprintf("Done: %d / %d\n", displayrc, count);
displayrc++;
}
}
}
WaitForFinish(qtrk, count);
// Measure speed
double tend = GetPreciseTime();
img.free();
float mean = sumScheduleTime / count;
float stdev = sqrt(sumScheduleTime2 / count - mean * mean);
dbgprintf("Scheduletime: Avg=%f, Max=%f, Stdev=%f\n", mean*1000, maxScheduleTime*1000, stdev*1000);
*scheduleTime = mean;
return count/(tend-tstart);
}
int NearestPowerOfTwo(int v)
{
int r=1;
while (r < v)
r *= 2;
if ( fabsf(r-v) < fabsf(r/2-v) )
return r;
return r/2;
}
int SmallestPowerOfTwo(int minval)
{
int r=1;
while (r < minval)
r *= 2;
return r;
}
struct SpeedInfo {
float speed_cpu, speed_gpu;
float sched_cpu, sched_gpu;
};
SpeedInfo SpeedCompareTest(int w, LocalizeModeEnum locMode, bool haveZLUT, int qi_iterations = 5)
{
int cudaBatchSize = 1024;
int count = 60000;
#ifdef _DEBUG
count = 100;
cudaBatchSize = 32;
#endif
LocMode_t locType = (LocMode_t)( locMode|LT_NormalizeProfile );
QTrkComputedConfig cfg;
cfg.width = cfg.height = w;
cfg.qi_iterations = qi_iterations;
cfg.qi_radial_coverage = 1.5f;
cfg.qi_angstep_factor = 1.5f;
cfg.qi_angular_coverage = 0.7f;
cfg.zlut_radial_coverage = 2.0f;
//std::vector<int> devices(1); devices[0]=1;
//SetCUDADevices(devices);
cfg.cuda_device = QTrkCUDA_UseAll;
cfg.numThreads = -1;
cfg.com_bgcorrection = 0.0f;
cfg.Update();
dbgprintf("Width: %d, QI radius: %f, radialsteps: %d\n", w, cfg.qi_maxradius, cfg.qi_radialsteps);
SpeedInfo info;
QueuedCPUTracker *cputrk = new QueuedCPUTracker(cfg);
info.speed_cpu = SpeedTest(cfg, cputrk, count, haveZLUT, locType, &info.sched_cpu, false);
delete cputrk;
QueuedCUDATracker *cudatrk = new QueuedCUDATracker(cfg, cudaBatchSize);
info.speed_gpu = SpeedTest(cfg, cudatrk, count, haveZLUT, locType, &info.sched_gpu, false);
//info.speed_gpu = SpeedTest(cfg, cudatrk, count, haveZLUT, locType, &info.sched_gpu);
std::string report = cudatrk->GetProfileReport();
delete cudatrk;
dbgprintf("CPU tracking speed: %d img/s\n", (int)info.speed_cpu);
dbgprintf("GPU tracking speed: %d img/s\n", (int)info.speed_gpu);
return info;
}
void ProfileSpeedVsROI(LocalizeModeEnum locMode, const char *outputcsv, bool haveZLUT, int qi_iterations)
{
std::vector<float> values;
for (int roi=20;roi<=180;roi+=10) { // same as BenchmarkROIAccuracy()
SpeedInfo info = SpeedCompareTest(roi, locMode, haveZLUT, qi_iterations);
values.push_back( roi);
values.push_back(info.speed_cpu);
values.push_back( info.speed_gpu);
}
const char *labels[] = { "ROI", "CPU", "CUDA" };
WriteImageAsCSV(outputcsv, &values[0], 3, values.size()/3, labels);
}
void CompareAccuracy (const char *lutfile)
{
QTrkSettings cfg;
cfg.width=150;
cfg.height=150;
cfg.numThreads=1;
auto cpu = RunTracker<QueuedCPUTracker> (lutfile, &cfg, false, "cpu", LT_QI);
auto gpu = RunTracker<QueuedCUDATracker>(lutfile, &cfg, false, "gpu", LT_QI);
// auto cpugc = RunTracker<QueuedCPUTracker>(lutfile, &cfg, true, "cpugc");
// auto gpugc = RunTracker<QueuedCUDATracker>(lutfile, &cfg, true, "gpugc");
for (int i=0;i<::min((int)cpu.output.size(),20);i++) {
dbgprintf("CPU-GPU: %f, %f\n", cpu.output[i].x-gpu.output[i].x,cpu.output[i].y-gpu.output[i].y);
}
/* dbgprintf("CPU\tGPU\tCPU(gc)\tGPU(gc)\n");
dbgprintf("St Dev. : CPU: %.2f\tGPU: %.2f\tCPU(gc)%.2f\tGPU(gc)%.2f\n", StDev(cpu).x, StDev(gpu).x, StDev(cpugc).x, StDev(gpugc).x);
dbgprintf("Mean err: CPU: %.2f\tGPU: %.2f\tCPU(gc)%.2f\tGPU(gc)%.2f\n", Mean(cpu).x, Mean(gpu).x, Mean(cpugc).x, Mean(gpugc).x);
*/
}
/*
texture<float, hipTextureType2D, hipReadModeElementType> test_tex(0, hipFilterModePoint); // Un-normalized
texture<float, hipTextureType2D, hipReadModeElementType> test_tex_lin(0, hipFilterModeLinear); // Un-normalized
__global__ void TestSampling(int n , cudaImageListf img, float *rtex, float *rtex2, float *rmem, float2* pts)
{
int idx = threadIdx.x+blockDim.x * blockIdx.x;
if (idx < n) {
float x = pts[idx].x;
float y = pts[idx].y;
int ii = 1;
rtex[idx] = tex2D(test_tex_lin, x+0.5f, y+0.5f+img.h*ii);
bool outside;
rtex2[idx] = img.interpolateFromTexture(test_tex, x, y, ii, outside);
rmem[idx] = img.interpolate(x,y,ii, outside);
}
}
void TestTextureFetch()
{
int w=8,h=4;
cudaImageListf img = cudaImageListf::alloc(w,h,2);
float* himg = new float[w*h*2];
int N=10;
std::vector<vector2f> pts(N);
for(int i=0;i<N;i++) {
pts[i]=vector2f( rand_uniform<float>() * (w-1), rand_uniform<float>() * (h-1) );
}
device_vec<vector2f> dpts;
dpts.copyToDevice(pts, false);
srand(1);
for (int i=0;i<w*h*2;i++)
himg[i]=i;
img.copyToDevice(himg,false);
img.bind(test_tex);
img.bind(test_tex_lin);
device_vec<float> rtex(N),rmem(N),rtex2(N);
int nt=32;
TestSampling<<< dim3( (N+nt-1)/nt ), dim3(nt) >>> (N, img, rtex.data,rtex2.data,rmem.data, (float2*)dpts.data);
img.unbind(test_tex_lin);
img.unbind(test_tex);
auto hmem = rmem.toVector();
auto htex = rtex.toVector();
auto htex2 = rtex2.toVector();
for (int x=0;x<N;x++) {
dbgprintf("[%.2f, %.2f]: %f (tex), %f(tex2), %f (mem). tex-mem: %f, tex2-mem: %f\n",
pts[x].x, pts[x].y, htex[x], htex2[x], hmem[x], htex[x]-hmem[x],htex2[x]-hmem[x]);
}
}
*/
void BasicQTrkTest()
{
QTrkComputedConfig cc;
cc.width = cc.height = 100;
cc.Update();
QueuedCUDATracker qtrk(cc);
float zmin=1,zmax=5;
ImageData img = ImageData::alloc(cc.width,cc.height);
float pos_x = cc.width/2 - 5;
float pos_y = cc.height/2 + 3;
GenerateTestImage(img, pos_x, pos_y, (zmin+zmax)/2, 0);
int N = 100000;
#ifdef _DEBUG
N = 10000;
#endif
double t = GetPreciseTime();
qtrk.SetLocalizationMode((LocMode_t)(LT_QI|LT_NormalizeProfile));
for (int i=0;i<N;i++)
{
LocalizationJob job ( i, 0, 0, 0);
qtrk.ScheduleLocalization((uchar*)img.data, sizeof(float)*cc.width, QTrkFloat, &job);
if(i%::max(1,(int)(N*0.1))==0) dbgprintf("Queued: %d / %d\n", i, N);
}
WaitForFinish(&qtrk, N);
t = GetPreciseTime() - t;
dbgprintf("Speed: %d imgs/s (Only QI, %d iterations)\n", (int)(N / t), cc.qi_iterations);
int count = 0;
while(qtrk.GetResultCount() != 0){
LocalizationResult res;
qtrk.FetchResults(&res,1);
if( res.pos.x > pos_x + 0.01f || res.pos.x < pos_x - 0.01f || res.pos.y > pos_y + 0.01f || res.pos.y < pos_y - 0.01f ){
if(count < 100)
dbgprintf("Location frame %d: (%02f,%02f)\n",res.job.frame, res.pos.x, res.pos.y);
count++;
}
}
dbgprintf("Errors: %d/%d (%f%%)\n", count, N, (float)100*count/N);
img.free();
}
void BasicQTrkTest_RM()
{
QTrkComputedConfig cc;
//cc.qi_iterations = 10;
cc.width = cc.height = 100;
cc.Update();
QueuedCUDATracker qtrk(cc);
float zmin=1,zmax=5;
ImageData img = ImageData::alloc(cc.width,cc.height);
// Positions to set
float pos_x = cc.width/2 - 5;
float pos_y = cc.height/2 + 3;
GenerateTestImage(img, pos_x, pos_y, (zmin+zmax)/2, 0);
int N = 100000;
#ifdef _DEBUG
N = 100000;
#endif
qtrk.SetLocalizationMode((LocMode_t)(LT_QI|LT_NormalizeProfile));
ResultManagerConfig RMcfg;
RMcfg.numBeads = 1;
RMcfg.numFrameInfoColumns = 0;
RMcfg.scaling = vector3f(1.0f,1.0f,1.0f);
RMcfg.offset = vector3f(0.0f,0.0f,0.0f);
RMcfg.writeInterval = 4000;
RMcfg.maxFramesInMemory = 0;
RMcfg.binaryOutput = false;
std::vector<std::string> colnames;
for(int ii = 0;ii<RMcfg.numFrameInfoColumns;ii++){
colnames.push_back(SPrintf("%d",ii));
}
outputter output(Files+Images);
ResultManager RM(
SPrintf("%s\\RMOutput.txt",output.folder.c_str()).c_str(),
SPrintf("%s\\RMFrameInfo.txt",output.folder.c_str()).c_str(),
&RMcfg, colnames);
RM.SetTracker(&qtrk);
double t = GetPreciseTime();
for (int i=0;i<N;i++)
{
LocalizationJob job ( i, 0, 0, 0);
qtrk.ScheduleLocalization((uchar*)img.data, sizeof(float)*cc.width, QTrkFloat, &job);
//if(i%::max(1,N/1000)==0) dbgprintf("Queued: %d / %d\n", i, N);
}
printf("\nDone queueing!\n");
// Tell the tracker to perform the localizations left in the queue regardless of batchSize
qtrk.Flush();
// Halt the test (=timer) until all localizations are done.
while(RM.GetFrameCounters().localizationsDone < N);
t = GetPreciseTime() - t;
// Tell the resultmanager to print the final available results regardless of writeInterval
RM.Flush();
while(RM.GetFrameCounters().lastSaveFrame != N);
dbgprintf("Speed: %d imgs/s (Only QI, %d iterations)\n", (int)(N / t), cc.qi_iterations);
img.free();
}
void TestGauss2D(bool calib)
{
int N=20, R=1000;
#ifdef _DEBUG
R=1;
#endif
std::vector<vector3f> rcpu = Gauss2DTest<QueuedCPUTracker>(N, R, calib);
std::vector<vector3f> rgpu = Gauss2DTest<QueuedCUDATracker>(N, R, calib);
for (int i=0;i<::min(20,N);i++) {
dbgprintf("[%d] CPU: X:%.5f, Y:%.5f\t;\tGPU: X:%.5f, Y:%.5f. \tDiff: X:%.5f, Y:%.5f\n",
i, rcpu[i].x, rcpu[i].y, rgpu[i].x, rgpu[i].y, rcpu[i].x-rgpu[i].x, rcpu[i].y-rgpu[i].y);
}
}
void TestRadialLUTGradientMethod()
{
}
std::vector< float > cmp_cpu_qi_prof;
std::vector< float > cmp_gpu_qi_prof;
std::vector< std::complex<float> > cmp_cpu_qi_fft_out;
std::vector< std::complex<float> > cmp_gpu_qi_fft_out;
void QICompare(const char *lutfile )
{
QTrkSettings cfg;
cfg.qi_iterations=1;
cfg.width = 150;
cfg.height = 150;
cfg.numThreads=1;
QueuedCUDATracker gpu(cfg, 1);
QueuedCPUTracker cpu(cfg);
ImageData lut=ReadJPEGFile(lutfile);
ImageData img=ImageData::alloc(cfg.width,cfg.height);
srand(0);
const int N=1;
gpu.SetLocalizationMode(LT_QI);
cpu.SetLocalizationMode(LT_QI);
for (int i=0;i<N;i++) {
LocalizationJob job(i, 0, 0, 0);
vector3f pos(cfg.width/2,cfg.height/2, lut.h/2);
pos.x += rand_uniform<float>();
pos.y += rand_uniform<float>();
GenerateImageFromLUT(&img, &lut, 1, cfg.width/2, pos);
gpu.ScheduleLocalization( (uchar*)img.data, sizeof(float)*img.w, QTrkFloat, &job);
cpu.ScheduleLocalization( (uchar*)img.data, sizeof(float)*img.w, QTrkFloat, &job);
}
gpu.Flush();
cpu.Flush();
while(cpu.GetResultCount() != N || gpu.GetResultCount() != N );
ImageData dbgImg = cpu.DebugImage(0);
FloatToJPEGFile("qidbgimg.jpg", dbgImg.data, dbgImg.w, dbgImg.h);
auto rcpu = FetchResults(&cpu), rgpu = FetchResults(&gpu);
for (int i=0;i<N;i++) {
vector3f d=rcpu[i]-rgpu[i];
dbgprintf("[%d]: CPU: x=%f, y=%f. GPU: x=%f, y=%f.\tGPU-CPU: x:%f, y:%f\n", i, rcpu[i].x, rcpu[i].y, rgpu[i].x, rgpu[i].y, d.x,d.y);
}
// Profiles
for(uint i=0;i<cmp_cpu_qi_prof.size();i++) {
dbgprintf("QIPROF[%d]. CPU=%f, GPU=%f, Diff: %f\n", i, cmp_cpu_qi_prof[i], cmp_gpu_qi_prof[i], cmp_gpu_qi_prof[i]-cmp_cpu_qi_prof[i]);
}
// FFT out
for(uint i=0;i<cmp_cpu_qi_fft_out.size();i++) {
dbgprintf("fft-out[%d]. CPU=%f, GPU=%f, Diff: %f\n", i, cmp_cpu_qi_fft_out[i].real(), cmp_gpu_qi_fft_out[i].real(), cmp_gpu_qi_fft_out[i].real()-cmp_cpu_qi_fft_out[i].real());
}
img.free();
lut.free();
}
void TestBenchmarkLUT()
{
BenchmarkLUT bml("refbeadlut.jpg");
ImageData img=ImageData::alloc(120,120);
ImageData lut = ImageData::alloc(bml.lut_w, bml.lut_h);
bml.GenerateLUT(&lut);
WriteJPEGFile("refbeadlut-lutsmp.jpg", lut);
lut.free();
bml.GenerateSample(&img, vector3f(img.w/2,img.h/2,bml.lut_h/2), 0, img.w/2-5);
WriteJPEGFile("refbeadlut-bmsmp.jpg", img);
img.free();
}
template<typename T>
void check_arg(const std::vector<std::string>& args, const char *name, T *param)
{
for (uint i=0;i<args.size();i++) {
if (args[i] == name) {
*param = (T)atof(args[i+1].c_str());
return;
}
}
}
void check_strarg(const std::vector<std::string>& args, const char *name, std::string* param)
{
for (uint i=0;i<args.size();i++) {
if (args[i] == name) {
*param = args[i+1];
return;
}
}
}
int CmdLineRun(int argc, char*argv[])
{
QTrkSettings cfg;
std::vector<std::string> args(argc-1);
for (int i=0;i<argc-1;i++)
args[i]=argv[i+1];
check_arg(args, "roi", &cfg.width);
cfg.height=cfg.width;
int count=100;
check_arg(args, "count", &count);
std::string outputfile, fixlutfile, inputposfile, bmlutfile, rescaledlutfile;
std::string radialWeightsFile;
check_strarg(args, "output", &outputfile);
check_strarg(args, "fixlut", &fixlutfile);
check_strarg(args, "bmlut", &bmlutfile);
check_strarg(args, "inputpos", &inputposfile);
check_strarg(args, "regenlut", &rescaledlutfile);
check_strarg(args, "radweights", &radialWeightsFile);
std::string crlboutput;
check_strarg(args, "crlb", &crlboutput);
std::vector< vector3f > inputPos;
if (!inputposfile.empty()) {
inputPos = ReadVector3CSV(inputposfile.c_str());
count = inputPos.size();
}
check_arg(args, "zlut_minradius", &cfg.zlut_minradius);
check_arg(args, "zlut_radial_coverage", &cfg.zlut_radial_coverage);
check_arg(args, "zlut_angular_coverage", &cfg.zlut_angular_coverage);
check_arg(args, "zlut_roi_coverage", &cfg.zlut_roi_coverage);
check_arg(args, "qi_iterations", &cfg.qi_iterations);
check_arg(args, "qi_minradius", &cfg.qi_minradius);
check_arg(args, "qi_radial_coverage", &cfg.qi_radial_coverage);
check_arg(args, "qi_angular_coverage", &cfg.qi_angular_coverage);
check_arg(args, "qi_roi_coverage", &cfg.qi_roi_coverage);
check_arg(args, "qi_angstep_factor", &cfg.qi_angstep_factor);
check_arg(args, "downsample", &cfg.downsample);
int zlutAlign=0;
check_arg(args, "zlutalign", &zlutAlign);
float pixelmax = 28 * 255;
check_arg(args, "pixelmax", &pixelmax);
std::string lutsmpfile;
check_strarg(args, "lutsmpfile", &lutsmpfile);
int cuda=1;
check_arg(args, "cuda", &cuda);
QueuedTracker* qtrk;
if (cuda) qtrk = new QueuedCUDATracker(cfg);
else qtrk = new QueuedCPUTracker(cfg);
ImageData lut;
BenchmarkLUT bmlut;
if (!fixlutfile.empty())
{
lut = ReadJPEGFile(fixlutfile.c_str());
if(!rescaledlutfile.empty()) {
// rescaling allowed
ImageData newlut;
ResampleLUT(qtrk, &lut, lut.h, &newlut, rescaledlutfile.c_str());
lut.free();
lut=newlut;
}
else if (lut.w != qtrk->cfg.zlut_radialsteps) {
lut.free();
dbgprintf("Invalid LUT size (%d). Expecting %d radialsteps\n", lut.w, qtrk->cfg.zlut_radialsteps);
delete qtrk;
return -1;
}
qtrk->SetRadialZLUT(lut.data,1,lut.h);
}
else
{
if (bmlutfile.empty()) {
delete qtrk;
dbgprintf("No lut file\n");
return -1;
}
bmlut.Load(bmlutfile.c_str());
lut = ImageData::alloc(qtrk->cfg.zlut_radialsteps, bmlut.lut_h);
bmlut.GenerateLUT(&lut);
if (!rescaledlutfile.empty())
WriteJPEGFile(rescaledlutfile.c_str(), lut);
qtrk->SetRadialZLUT(lut.data,1,lut.h);
}
if (inputPos.empty()) {
inputPos.resize(count);
for (int i=0;i<count;i++){
inputPos[i]=vector3f(cfg.width/2,cfg.height/2,lut.h/2);
}
}
if (!radialWeightsFile.empty())
{
auto rwd = ReadCSV(radialWeightsFile.c_str());
std::vector<float> rw(rwd.size());
if (rw.size() == qtrk->cfg.zlut_radialsteps)
qtrk->SetRadialWeights(&rw[0]);
else {
dbgprintf("Invalid # radial weights");
delete qtrk;
}
}
std::vector<ImageData> imgs (inputPos.size());
std::vector<vector3f> crlb(inputPos.size());
for (uint i=0;i<inputPos.size();i++) {
imgs[i]=ImageData::alloc(cfg.width, cfg.height);
//vector3f pos = centerpos + range*vector3f(rand_uniform<float>()-0.5f, rand_uniform<float>()-0.5f, rand_uniform<float>()-0.5f)*2;
auto p = inputPos[i];
if (!bmlut.lut_w) {
GenerateImageFromLUT(&imgs[i], &lut, qtrk->cfg.zlut_minradius, qtrk->cfg.zlut_maxradius, p, false);
if (!crlboutput.empty()) {
SampleFisherMatrix sfm(pixelmax);
crlb[i]=sfm.Compute(p, vector3f(1,1,1)*0.001f, lut, qtrk->cfg.width,qtrk->cfg.height, qtrk->cfg.zlut_minradius, qtrk->cfg.zlut_maxradius).Inverse().diag();
}
} else
bmlut.GenerateSample(&imgs[i], p, qtrk->cfg.zlut_minradius, qtrk->cfg.zlut_maxradius);
imgs[i].normalize();
if (pixelmax > 0) ApplyPoissonNoise(imgs[i], pixelmax, 255);
if(i==0 && !lutsmpfile.empty()) WriteJPEGFile(lutsmpfile.c_str(), imgs[i]);
}
int locMode = LT_LocalizeZ | LT_NormalizeProfile | LT_LocalizeZWeighted;
if (qtrk->cfg.qi_iterations > 0)
locMode |= LT_QI;
if (zlutAlign)
locMode |= LT_ZLUTAlign;
qtrk->SetLocalizationMode((LocMode_t)locMode);
double tstart=GetPreciseTime();
for (uint i=0;i<inputPos.size();i++)
{
LocalizationJob job(i, 0, 0, 0);
qtrk->ScheduleImageData(&imgs[i], &job);
}
WaitForFinish(qtrk, inputPos.size());
double tend = GetPreciseTime();
std::vector<vector3f> results(inputPos.size());
for (uint i=0;i<inputPos.size();i++) {
LocalizationResult r;
qtrk->FetchResults(&r,1);
results[r.job.frame]=r.pos;
}
vector3f meanErr, stdevErr;
MeanStDevError(inputPos, results, meanErr, stdevErr);
dbgprintf("Mean err X=%f,Z=%f. St deviation: X=%f,Z=%f\n", meanErr.x,meanErr.y,stdevErr.x,stdevErr.z);
if (!crlboutput.empty())
WriteTrace(crlboutput, &crlb[0], crlb.size());
WriteTrace(outputfile, &results[0], inputPos.size());
if (lut.data) lut.free();
delete qtrk;
return 0;
}
void BuildZLUT(std::string folder, outputter* output)
{
int ROISize = 100;
std::vector<BeadPos> beads = read_beadlist(SPrintf("%sbeadlist.txt",folder.c_str()));
int numImgInStack = 1218;
int numPositions = 1001; // 10nm/frame
float range = 10.0f; // total range 25.0 um -> 35.0 um
float umPerImg = range/numImgInStack;
QTrkComputedConfig cfg;
cfg.width=cfg.height = ROISize;
cfg.qi_angstep_factor = 1;
cfg.qi_iterations = 6;
cfg.qi_angular_coverage = 0.7f;
cfg.qi_roi_coverage = 1;
cfg.qi_radial_coverage = 1.5f;
cfg.qi_minradius=0;
cfg.zlut_minradius=0;
cfg.zlut_angular_coverage = 0.7f;
cfg.zlut_roi_coverage = 1;
cfg.zlut_radial_coverage = 1.5f;
cfg.zlut_minradius = 0;
cfg.qi_minradius = 0;
cfg.com_bgcorrection = 0;
cfg.xc1_profileLength = ROISize*0.8f;
cfg.xc1_profileWidth = ROISize*0.2f;
cfg.xc1_iterations = 1;
cfg.Update();
cfg.WriteToFile();
int zplanes = 50;
QueuedCUDATracker* qtrk = new QueuedCUDATracker(cfg);
//qtrk->SetLocalizationMode(LT_NormalizeProfile | LT_QI);
qtrk->SetRadialZLUT(0, beads.size(), zplanes);
qtrk->BeginLUT(0);
int pxPerBead = ROISize*ROISize;
int memSizePerBead = pxPerBead*sizeof(float);
int startFrame = 400;
for(int plane = 0; plane < zplanes; plane++){
output->outputString(SPrintf("Frame %d/%d",plane+1,zplanes),true);
int frameNum = startFrame+(int)(numImgInStack-startFrame)*((float)plane/zplanes);
std::string file = SPrintf("%s\img%05d.jpg",folder.c_str(),frameNum);
ImageData frame = ReadJPEGFile(file.c_str());
float* data = new float[beads.size()*pxPerBead];
for(uint ii = 0; ii < beads.size(); ii++){
vector2f pos;
pos.x = beads.at(ii).x - ROISize/2;
pos.y = beads.at(ii).y - ROISize/2;
ImageData crop = CropImage(frame,pos.x,pos.y,ROISize,ROISize);
//output->outputImage(crop,SPrintf("%d-%05d",ii,plane));
memcpy(data+ii*pxPerBead,crop.data,memSizePerBead);
crop.free();
}
/*
// To verify seperate frame bead stack generation
output->newFile(SPrintf("data-plane-%d",plane));
output->outputArray(data,beads.size()*pxPerBead);
ImageData allBeads = ImageData(data,ROISize,ROISize*beads.size());
output->outputImage(allBeads,SPrintf("allBeads-%05d",frameNum));//*/
qtrk->BuildLUT(data, sizeof(float)*ROISize, QTrkFloat, plane);
frame.free();
delete[] data;
}
qtrk->FinalizeLUT();
float* luts = new float[beads.size()*(zplanes*cfg.zlut_radialsteps)];
qtrk->GetRadialZLUT(luts);
for(int ii = 0; ii < beads.size(); ii++){
ImageData lut = ImageData::alloc(cfg.zlut_radialsteps, zplanes);
memcpy(lut.data, &luts[ii*cfg.zlut_radialsteps*zplanes], cfg.zlut_radialsteps*zplanes*sizeof(float));
//memcpy(lut.data,qtrk->GetZLUTByIndex(ii),cfg.zlut_radialsteps*zplanes*sizeof(float));
//output->outputImage(lut,SPrintf("lut%03d,%d",beads.at(ii).x,beads.at(ii).y));
output->outputImage(lut, SPrintf("lut%03d",ii));
lut.free();
}
qtrk->Flush();
delete qtrk;
}
int main(int argc, char *argv[])
{
//listDevices();
printf("%d, %d\n",sizeof(long),sizeof(int));
if (argc > 1)
{
return CmdLineRun(argc, argv);
}
try {
// outputter output(Files+Images);
// BuildZLUT("C:\\TestImages\\TestMovie150507_2\\images\\jpg\\Zstack\\", &output);
BasicQTrkTest();
// BasicQTrkTest_RM();
// TestBenchmarkLUT();
// testLinearArray();
// TestTextureFetch();
// TestGauss2D(true);
// MultipleLUTTest();
// TestSurfaceReadWrite();
// TestImage4D();
// TestImage4DMemory();
// TestImageLUT("../cputrack-test/lut000.jpg");
// TestRadialLUTGradientMethod();
// BenchmarkParams();
// TestTextureFetch();
// QICompare("../cputrack-test/lut000.jpg");
// TestCMOSNoiseInfluence<QueuedCUDATracker>("../cputrack-test/lut000.jpg");
// CompareAccuracy("../cputrack-test/lut000.jpg");
// QTrkCompareTest();
/*
ProfileSpeedVsROI(LT_OnlyCOM, "speeds-com.txt", false, 0);
ProfileSpeedVsROI(LT_OnlyCOM, "speeds-com-z.txt", true, 0);
ProfileSpeedVsROI(LT_XCor1D, "speeds-xcor.txt", true, 0);
for (int qi_it=1;qi_it<=4;qi_it++) {
ProfileSpeedVsROI(LT_QI, SPrintf("speeds-qi-%d-iterations.txt",qi_it).c_str(), true, qi_it);
}*/
/* auto info = SpeedCompareTest(80, false);
auto infogc = SpeedCompareTest(80, true);
dbgprintf("[gainc=false] CPU: %f, GPU: %f\n", info.speed_cpu, info.speed_gpu);
dbgprintf("[gainc=true] CPU: %f, GPU: %f\n", infogc.speed_cpu, infogc.speed_gpu);
*/
} catch (const std::exception& e) {
dbgprintf("Exception: %s\n", e.what());
}
system("pause");
return 0;
} | 3352965b61de766f100919811f38d60134e2eefb.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "std_incl.h"
#include "utils.h"
#include <cassert>
#include <cstdlib>
#include <stdio.h>
#include <windows.h>
#include <cstdarg>
#include <valarray>
#include "random_distr.h"
#include <stdint.h>
#include "gpu_utils.h"
#include "QueuedCUDATracker.h"
#include "QueuedCPUTracker.h"
#include "../cputrack-test/SharedTests.h"
#include "BenchmarkLUT.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include "FisherMatrix.h"
#include "testutils.h"
#include "ResultManager.h"
#include "ExtractBeadImages.h"
void BenchmarkParams();
std::string getPath(const char *file)
{
std::string s = file;
int pos = s.length()-1;
while (pos>0 && s[pos]!='\\' && s[pos]!= '/' )
pos--;
return s.substr(0, pos);
}
inline __device__ float2 mul_conjugate(float2 a, float2 b)
{
float2 r;
r.x = a.x*b.x + a.y*b.y;
r.y = a.y*b.x - a.x*b.y;
return r;
}
void ShowCUDAError() {
cudaError_t err = cudaGetLastError();
dbgprintf("Cuda error: %s\n", cudaGetErrorString(err));
}
__shared__ float cudaSharedMem[];
__device__ float compute(int idx, float* buf, int s)
{
// some random calcs to make the kernel unempty
float k=0.0f;
for (int x=0;x<s;x++ ){
k+=cosf(x*0.1f*idx);
buf[x]=k;
}
for (int x=0;x<s/2;x++){
buf[x]=buf[x]*buf[x];
}
float sum=0.0f;
for (int x=s-1;x>=1;x--) {
sum += buf[x-1]/(fabsf(buf[x])+0.1f);
}
return sum;
}
__global__ void testWithGlobal(int n, int s, float* result, float* buf) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
result [idx] = compute(idx, &buf [idx * s],s);
}
}
__global__ void testWithShared(int n, int s, float* result) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
result [idx] = compute(idx, &cudaSharedMem[threadIdx.x * s],s);
}
}
void TestSharedMem()
{
int n=100, s=200;
dim3 nthreads(32), nblocks( (n+nthreads.x-1)/nthreads.x);
device_vec<float> buf(n*s);
device_vec<float> result_s(n), result_g(n);
double t0 = GetPreciseTime();
testWithGlobal<<<nblocks,nthreads>>>(n,s,result_g.data,buf.data);
cudaDeviceSynchronize();
double t1 = GetPreciseTime();
testWithShared <<<nblocks,nthreads,s*sizeof(float)*nthreads.x>>>(n,s,result_s.data);
cudaDeviceSynchronize();
double t2 = GetPreciseTime();
std::vector<float> rs = result_s, rg = result_g;
for (int x=0;x<n;x++) {
dbgprintf("result_s[%d]=%f. result_g[%d]=%f\n", x,rs[x], x,rg[x]);
}
dbgprintf("Speed of shared comp: %f, speed of global comp: %f\n", n/(t2-t1), n/(t1-t0));
}
void QTrkCompareTest()
{
QTrkSettings cfg;
cfg.width = cfg.height = 40;
cfg.qi_iterations = 1;
cfg.xc1_iterations = 2;
cfg.xc1_profileLength = 64;
cfg.numThreads = -1;
cfg.com_bgcorrection = 0.0f;
bool haveZLUT = false;
#ifdef _DEBUG
cfg.numThreads = 2;
cfg.qi_iterations=1;
int total= 10;
int batchSize = 2;
haveZLUT=false;
#else
cfg.numThreads = 4;
int total = 10000;
int batchSize = 512;
#endif
QueuedCUDATracker qtrk(cfg, batchSize);
QueuedCPUTracker qtrkcpu(cfg);
ImageData img = ImageData::alloc(cfg.width,cfg.height);
bool cpucmp = true;
qtrk.EnableTextureCache(true);
srand(1);
// Generate ZLUT
int zplanes=100;
float zmin=0.5,zmax=3;
qtrk.SetRadialZLUT(0, 1, zplanes);
if (cpucmp) qtrkcpu.SetRadialZLUT(0, 1, zplanes);
if (haveZLUT) {
for (int x=0;x<zplanes;x++) {
vector2f center ( cfg.width/2, cfg.height/2 );
float s = zmin + (zmax-zmin) * x/(float)(zplanes-1);
GenerateTestImage(img, center.x, center.y, s, 0.0f);
WriteJPEGFile("qtrkzlutimg.jpg", img);
qtrk.BuildLUT(img.data,img.pitch(),QTrkFloat, 0, (vector2f*)(0));
if (cpucmp)
qtrkcpu.BuildLUT(img.data,img.pitch(),QTrkFloat, 0);
}
qtrk.FinalizeLUT();
if (cpucmp) qtrkcpu.FinalizeLUT();
// wait to finish ZLUT
while(true) {
int rc = qtrk.GetResultCount();
if (rc == zplanes) break;
Sleep(100);
dbgprintf(".");
}
if (cpucmp) {
while(qtrkcpu.GetResultCount() != zplanes);
}
}
float* zlut = new float[qtrk.cfg.zlut_radialsteps*zplanes];
qtrk.GetRadialZLUT(zlut);
if (cpucmp) {
float* zlutcpu = new float[qtrkcpu.cfg.zlut_radialsteps*zplanes];
qtrkcpu.GetRadialZLUT(zlutcpu);
WriteImageAsCSV("zlut-cpu.txt", zlutcpu, qtrkcpu.cfg.zlut_radialsteps, zplanes);
WriteImageAsCSV("zlut-gpu.txt", zlut, qtrkcpu.cfg.zlut_radialsteps, zplanes);
delete[] zlutcpu;
}
qtrk.ClearResults();
if (cpucmp) qtrkcpu.ClearResults();
FloatToJPEGFile ("qtrkzlutcuda.jpg", zlut, qtrk.cfg.zlut_radialsteps, zplanes);
delete[] zlut;
// Schedule images to localize on
dbgprintf("Benchmarking...\n", total);
GenerateTestImage(img, cfg.width/2, cfg.height/2, (zmin+zmax)/2, 0);
double tstart = GetPreciseTime();
int rc = 0, displayrc=0;
LocMode_t flags = (LocMode_t)(LT_NormalizeProfile |LT_QI| (haveZLUT ? LT_LocalizeZ : 0) );
qtrk.SetLocalizationMode(flags);
qtrkcpu.SetLocalizationMode(flags);
for (int n=0;n<total;n++) {
LocalizationJob jobInfo;
jobInfo.frame = n;
jobInfo.zlutIndex = 0;
qtrk.ScheduleLocalization((uchar*)img.data, cfg.width*sizeof(float), QTrkFloat,&jobInfo);
if (cpucmp) qtrkcpu.ScheduleLocalization((uchar*)img.data, cfg.width*sizeof(float), QTrkFloat, &jobInfo);
if (n % 10 == 0) {
rc = qtrk.GetResultCount();
while (displayrc<rc) {
if( displayrc%(total/10)==0) dbgprintf("Done: %d / %d\n", displayrc, total);
displayrc++;
}
}
}
if (cpucmp) qtrkcpu.Flush();
WaitForFinish(&qtrk, total);
// Measure speed
double tend = GetPreciseTime();
if (cpucmp) {
dbgprintf("waiting for cpu results..\n");
while (total != qtrkcpu.GetResultCount())
Sleep(10);
}
img.free();
const int NumResults = 20;
LocalizationResult results[NumResults], resultscpu[NumResults];
int rcount = std::min(NumResults,total);
for (int i=0;i<rcount;i++) {
qtrk.FetchResults(&results[i], 1);
if (cpucmp) qtrkcpu.FetchResults(&resultscpu[i], 1);
}
// if you wonder about this syntax, google C++ lambda functions
std::sort(results, results+rcount, [](LocalizationResult a, LocalizationResult b) -> bool { return a.job.frame > b.job.frame; });
if(cpucmp) std::sort(resultscpu, resultscpu+rcount, [](LocalizationResult a, LocalizationResult b) -> bool { return a.job.frame > b.job.frame; });
for (int i=0;i<rcount;i++) {
LocalizationResult& r = results[i];
dbgprintf("gpu [%d] x: %f, y: %f. z: %+g, COM: %f, %f\n", i,r.pos.x, r.pos.y, r.pos.z, r.firstGuess.x, r.firstGuess.y);
if (cpucmp) {
r = resultscpu[i];
dbgprintf("cpu [%d] x: %f, y: %f. z: %+g, COM: %f, %f\n", i,r.pos.x, r.pos.y, r.pos.z, r.firstGuess.x, r.firstGuess.y);
}
}
dbgprintf("Localization Speed: %d (img/s)\n", (int)( total/(tend-tstart) ));
}
void listDevices()
{
cudaDeviceProp prop;
int dc;
cudaGetDeviceCount(&dc);
for (int k=0;k<dc;k++) {
cudaGetDeviceProperties(&prop, k);
dbgprintf("Device[%d] = %s\n", k, prop.name);
dbgprintf("\tMax texture width: %d\n" ,prop.maxTexture2D[0]);
}
}
__global__ void SimpleKernel(int N, float* a){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
for (int x=0;x<1000;x++)
a[idx] = asin(a[idx]+x);
}
}
void TestAsync()
{
int N =100000;
int nt = 32;
pinned_array<float> a(N);
// cudaMallocHost(&a, sizeof(float)*N, 0);
device_vec<float> A(N);
cudaStream_t s0;
cudaEvent_t done;
cudaStreamCreate(&s0);
cudaEventCreate(&done,0);
for (int x=0;x<N;x++)
a[x] = cos(x*0.01f);
for (int x=0;x<1;x++) {
{ MeasureTime mt("a->A"); A.copyToDevice(a.data(), N, true); }
{ MeasureTime mt("func(A)");
SimpleKernel<<<dim3( (N+nt-1)/nt ), dim3(nt)>>>(N, A.data);
}
{ MeasureTime mt("A->a"); A.copyToHost(a.data(), true); }
}
cudaEventRecord(done);
{
MeasureTime("sync..."); while (cudaEventQuery(done) != cudaSuccess);
}
cudaStreamDestroy(s0);
cudaEventDestroy(done);
}
__global__ void emptyKernel()
{}
float SpeedTest(const QTrkSettings& cfg, QueuedTracker* qtrk, int count, bool haveZLUT, LocMode_t locType, float* scheduleTime, bool gaincorrection=false)
{
ImageData img=ImageData::alloc(cfg.width,cfg.height);
srand(1);
// Generate ZLUT
int zplanes=100;
float zmin=0.5,zmax=3;
qtrk->SetRadialZLUT(0, 1, zplanes);
if (gaincorrection) EnableGainCorrection(qtrk);
if (haveZLUT) {
for (int x=0;x<zplanes;x++) {
vector2f center( cfg.width/2, cfg.height/2 );
float s = zmin + (zmax-zmin) * x/(float)(zplanes-1);
GenerateTestImage(img, center.x, center.y, s, 0.0f);
qtrk->BuildLUT(img.data,img.pitch(),QTrkFloat, 0);
}
qtrk->FinalizeLUT();
}
qtrk->ClearResults();
// Schedule images to localize on
dbgprintf("Benchmarking...\n", count);
GenerateTestImage(img, cfg.width/2, cfg.height/2, (zmin+zmax)/2, 0);
double tstart = GetPreciseTime();
int rc = 0, displayrc=0;
double maxScheduleTime = 0.0f;
double sumScheduleTime2 = 0.0f;
double sumScheduleTime = 0.0f;
qtrk->SetLocalizationMode(locType| (haveZLUT ? LT_LocalizeZ : 0));
for (int n=0;n<count;n++) {
double t0 = GetPreciseTime();
///qtrk->ScheduleLocalization((uchar*)image, cfg.width*sizeof(float), QTrkFloat, flags, n, 0, 0, 0, 0);
ROIPosition roipos[]={ {0,0} };
LocalizationJob job(n, 0, 0,0);
qtrk->ScheduleFrame((uchar*)img.data, cfg.width*sizeof(float),cfg.width,cfg.height, roipos, 1, QTrkFloat, &job);
double dt = GetPreciseTime() - t0;
maxScheduleTime = std::max(maxScheduleTime, dt);
sumScheduleTime += dt;
sumScheduleTime2 += dt*dt;
if (n % 10 == 0) {
rc = qtrk->GetResultCount();
while (displayrc<rc) {
if( displayrc%(count/10)==0) dbgprintf("Done: %d / %d\n", displayrc, count);
displayrc++;
}
}
}
WaitForFinish(qtrk, count);
// Measure speed
double tend = GetPreciseTime();
img.free();
float mean = sumScheduleTime / count;
float stdev = sqrt(sumScheduleTime2 / count - mean * mean);
dbgprintf("Scheduletime: Avg=%f, Max=%f, Stdev=%f\n", mean*1000, maxScheduleTime*1000, stdev*1000);
*scheduleTime = mean;
return count/(tend-tstart);
}
int NearestPowerOfTwo(int v)
{
int r=1;
while (r < v)
r *= 2;
if ( fabsf(r-v) < fabsf(r/2-v) )
return r;
return r/2;
}
int SmallestPowerOfTwo(int minval)
{
int r=1;
while (r < minval)
r *= 2;
return r;
}
struct SpeedInfo {
float speed_cpu, speed_gpu;
float sched_cpu, sched_gpu;
};
SpeedInfo SpeedCompareTest(int w, LocalizeModeEnum locMode, bool haveZLUT, int qi_iterations = 5)
{
int cudaBatchSize = 1024;
int count = 60000;
#ifdef _DEBUG
count = 100;
cudaBatchSize = 32;
#endif
LocMode_t locType = (LocMode_t)( locMode|LT_NormalizeProfile );
QTrkComputedConfig cfg;
cfg.width = cfg.height = w;
cfg.qi_iterations = qi_iterations;
cfg.qi_radial_coverage = 1.5f;
cfg.qi_angstep_factor = 1.5f;
cfg.qi_angular_coverage = 0.7f;
cfg.zlut_radial_coverage = 2.0f;
//std::vector<int> devices(1); devices[0]=1;
//SetCUDADevices(devices);
cfg.cuda_device = QTrkCUDA_UseAll;
cfg.numThreads = -1;
cfg.com_bgcorrection = 0.0f;
cfg.Update();
dbgprintf("Width: %d, QI radius: %f, radialsteps: %d\n", w, cfg.qi_maxradius, cfg.qi_radialsteps);
SpeedInfo info;
QueuedCPUTracker *cputrk = new QueuedCPUTracker(cfg);
info.speed_cpu = SpeedTest(cfg, cputrk, count, haveZLUT, locType, &info.sched_cpu, false);
delete cputrk;
QueuedCUDATracker *cudatrk = new QueuedCUDATracker(cfg, cudaBatchSize);
info.speed_gpu = SpeedTest(cfg, cudatrk, count, haveZLUT, locType, &info.sched_gpu, false);
//info.speed_gpu = SpeedTest(cfg, cudatrk, count, haveZLUT, locType, &info.sched_gpu);
std::string report = cudatrk->GetProfileReport();
delete cudatrk;
dbgprintf("CPU tracking speed: %d img/s\n", (int)info.speed_cpu);
dbgprintf("GPU tracking speed: %d img/s\n", (int)info.speed_gpu);
return info;
}
void ProfileSpeedVsROI(LocalizeModeEnum locMode, const char *outputcsv, bool haveZLUT, int qi_iterations)
{
std::vector<float> values;
for (int roi=20;roi<=180;roi+=10) { // same as BenchmarkROIAccuracy()
SpeedInfo info = SpeedCompareTest(roi, locMode, haveZLUT, qi_iterations);
values.push_back( roi);
values.push_back(info.speed_cpu);
values.push_back( info.speed_gpu);
}
const char *labels[] = { "ROI", "CPU", "CUDA" };
WriteImageAsCSV(outputcsv, &values[0], 3, values.size()/3, labels);
}
void CompareAccuracy (const char *lutfile)
{
QTrkSettings cfg;
cfg.width=150;
cfg.height=150;
cfg.numThreads=1;
auto cpu = RunTracker<QueuedCPUTracker> (lutfile, &cfg, false, "cpu", LT_QI);
auto gpu = RunTracker<QueuedCUDATracker>(lutfile, &cfg, false, "gpu", LT_QI);
// auto cpugc = RunTracker<QueuedCPUTracker>(lutfile, &cfg, true, "cpugc");
// auto gpugc = RunTracker<QueuedCUDATracker>(lutfile, &cfg, true, "gpugc");
for (int i=0;i<std::min((int)cpu.output.size(),20);i++) {
dbgprintf("CPU-GPU: %f, %f\n", cpu.output[i].x-gpu.output[i].x,cpu.output[i].y-gpu.output[i].y);
}
/* dbgprintf("CPU\tGPU\tCPU(gc)\tGPU(gc)\n");
dbgprintf("St Dev. : CPU: %.2f\tGPU: %.2f\tCPU(gc)%.2f\tGPU(gc)%.2f\n", StDev(cpu).x, StDev(gpu).x, StDev(cpugc).x, StDev(gpugc).x);
dbgprintf("Mean err: CPU: %.2f\tGPU: %.2f\tCPU(gc)%.2f\tGPU(gc)%.2f\n", Mean(cpu).x, Mean(gpu).x, Mean(cpugc).x, Mean(gpugc).x);
*/
}
/*
texture<float, cudaTextureType2D, cudaReadModeElementType> test_tex(0, cudaFilterModePoint); // Un-normalized
texture<float, cudaTextureType2D, cudaReadModeElementType> test_tex_lin(0, cudaFilterModeLinear); // Un-normalized
__global__ void TestSampling(int n , cudaImageListf img, float *rtex, float *rtex2, float *rmem, float2* pts)
{
int idx = threadIdx.x+blockDim.x * blockIdx.x;
if (idx < n) {
float x = pts[idx].x;
float y = pts[idx].y;
int ii = 1;
rtex[idx] = tex2D(test_tex_lin, x+0.5f, y+0.5f+img.h*ii);
bool outside;
rtex2[idx] = img.interpolateFromTexture(test_tex, x, y, ii, outside);
rmem[idx] = img.interpolate(x,y,ii, outside);
}
}
void TestTextureFetch()
{
int w=8,h=4;
cudaImageListf img = cudaImageListf::alloc(w,h,2);
float* himg = new float[w*h*2];
int N=10;
std::vector<vector2f> pts(N);
for(int i=0;i<N;i++) {
pts[i]=vector2f( rand_uniform<float>() * (w-1), rand_uniform<float>() * (h-1) );
}
device_vec<vector2f> dpts;
dpts.copyToDevice(pts, false);
srand(1);
for (int i=0;i<w*h*2;i++)
himg[i]=i;
img.copyToDevice(himg,false);
img.bind(test_tex);
img.bind(test_tex_lin);
device_vec<float> rtex(N),rmem(N),rtex2(N);
int nt=32;
TestSampling<<< dim3( (N+nt-1)/nt ), dim3(nt) >>> (N, img, rtex.data,rtex2.data,rmem.data, (float2*)dpts.data);
img.unbind(test_tex_lin);
img.unbind(test_tex);
auto hmem = rmem.toVector();
auto htex = rtex.toVector();
auto htex2 = rtex2.toVector();
for (int x=0;x<N;x++) {
dbgprintf("[%.2f, %.2f]: %f (tex), %f(tex2), %f (mem). tex-mem: %f, tex2-mem: %f\n",
pts[x].x, pts[x].y, htex[x], htex2[x], hmem[x], htex[x]-hmem[x],htex2[x]-hmem[x]);
}
}
*/
void BasicQTrkTest()
{
QTrkComputedConfig cc;
cc.width = cc.height = 100;
cc.Update();
QueuedCUDATracker qtrk(cc);
float zmin=1,zmax=5;
ImageData img = ImageData::alloc(cc.width,cc.height);
float pos_x = cc.width/2 - 5;
float pos_y = cc.height/2 + 3;
GenerateTestImage(img, pos_x, pos_y, (zmin+zmax)/2, 0);
int N = 100000;
#ifdef _DEBUG
N = 10000;
#endif
double t = GetPreciseTime();
qtrk.SetLocalizationMode((LocMode_t)(LT_QI|LT_NormalizeProfile));
for (int i=0;i<N;i++)
{
LocalizationJob job ( i, 0, 0, 0);
qtrk.ScheduleLocalization((uchar*)img.data, sizeof(float)*cc.width, QTrkFloat, &job);
if(i%std::max(1,(int)(N*0.1))==0) dbgprintf("Queued: %d / %d\n", i, N);
}
WaitForFinish(&qtrk, N);
t = GetPreciseTime() - t;
dbgprintf("Speed: %d imgs/s (Only QI, %d iterations)\n", (int)(N / t), cc.qi_iterations);
int count = 0;
while(qtrk.GetResultCount() != 0){
LocalizationResult res;
qtrk.FetchResults(&res,1);
if( res.pos.x > pos_x + 0.01f || res.pos.x < pos_x - 0.01f || res.pos.y > pos_y + 0.01f || res.pos.y < pos_y - 0.01f ){
if(count < 100)
dbgprintf("Location frame %d: (%02f,%02f)\n",res.job.frame, res.pos.x, res.pos.y);
count++;
}
}
dbgprintf("Errors: %d/%d (%f%%)\n", count, N, (float)100*count/N);
img.free();
}
void BasicQTrkTest_RM()
{
QTrkComputedConfig cc;
//cc.qi_iterations = 10;
cc.width = cc.height = 100;
cc.Update();
QueuedCUDATracker qtrk(cc);
float zmin=1,zmax=5;
ImageData img = ImageData::alloc(cc.width,cc.height);
// Positions to set
float pos_x = cc.width/2 - 5;
float pos_y = cc.height/2 + 3;
GenerateTestImage(img, pos_x, pos_y, (zmin+zmax)/2, 0);
int N = 100000;
#ifdef _DEBUG
N = 100000;
#endif
qtrk.SetLocalizationMode((LocMode_t)(LT_QI|LT_NormalizeProfile));
ResultManagerConfig RMcfg;
RMcfg.numBeads = 1;
RMcfg.numFrameInfoColumns = 0;
RMcfg.scaling = vector3f(1.0f,1.0f,1.0f);
RMcfg.offset = vector3f(0.0f,0.0f,0.0f);
RMcfg.writeInterval = 4000;
RMcfg.maxFramesInMemory = 0;
RMcfg.binaryOutput = false;
std::vector<std::string> colnames;
for(int ii = 0;ii<RMcfg.numFrameInfoColumns;ii++){
colnames.push_back(SPrintf("%d",ii));
}
outputter output(Files+Images);
ResultManager RM(
SPrintf("%s\\RMOutput.txt",output.folder.c_str()).c_str(),
SPrintf("%s\\RMFrameInfo.txt",output.folder.c_str()).c_str(),
&RMcfg, colnames);
RM.SetTracker(&qtrk);
double t = GetPreciseTime();
for (int i=0;i<N;i++)
{
LocalizationJob job ( i, 0, 0, 0);
qtrk.ScheduleLocalization((uchar*)img.data, sizeof(float)*cc.width, QTrkFloat, &job);
//if(i%std::max(1,N/1000)==0) dbgprintf("Queued: %d / %d\n", i, N);
}
printf("\nDone queueing!\n");
// Tell the tracker to perform the localizations left in the queue regardless of batchSize
qtrk.Flush();
// Halt the test (=timer) until all localizations are done.
while(RM.GetFrameCounters().localizationsDone < N);
t = GetPreciseTime() - t;
// Tell the resultmanager to print the final available results regardless of writeInterval
RM.Flush();
while(RM.GetFrameCounters().lastSaveFrame != N);
dbgprintf("Speed: %d imgs/s (Only QI, %d iterations)\n", (int)(N / t), cc.qi_iterations);
img.free();
}
void TestGauss2D(bool calib)
{
int N=20, R=1000;
#ifdef _DEBUG
R=1;
#endif
std::vector<vector3f> rcpu = Gauss2DTest<QueuedCPUTracker>(N, R, calib);
std::vector<vector3f> rgpu = Gauss2DTest<QueuedCUDATracker>(N, R, calib);
for (int i=0;i<std::min(20,N);i++) {
dbgprintf("[%d] CPU: X:%.5f, Y:%.5f\t;\tGPU: X:%.5f, Y:%.5f. \tDiff: X:%.5f, Y:%.5f\n",
i, rcpu[i].x, rcpu[i].y, rgpu[i].x, rgpu[i].y, rcpu[i].x-rgpu[i].x, rcpu[i].y-rgpu[i].y);
}
}
void TestRadialLUTGradientMethod()
{
}
std::vector< float > cmp_cpu_qi_prof;
std::vector< float > cmp_gpu_qi_prof;
std::vector< std::complex<float> > cmp_cpu_qi_fft_out;
std::vector< std::complex<float> > cmp_gpu_qi_fft_out;
void QICompare(const char *lutfile )
{
QTrkSettings cfg;
cfg.qi_iterations=1;
cfg.width = 150;
cfg.height = 150;
cfg.numThreads=1;
QueuedCUDATracker gpu(cfg, 1);
QueuedCPUTracker cpu(cfg);
ImageData lut=ReadJPEGFile(lutfile);
ImageData img=ImageData::alloc(cfg.width,cfg.height);
srand(0);
const int N=1;
gpu.SetLocalizationMode(LT_QI);
cpu.SetLocalizationMode(LT_QI);
for (int i=0;i<N;i++) {
LocalizationJob job(i, 0, 0, 0);
vector3f pos(cfg.width/2,cfg.height/2, lut.h/2);
pos.x += rand_uniform<float>();
pos.y += rand_uniform<float>();
GenerateImageFromLUT(&img, &lut, 1, cfg.width/2, pos);
gpu.ScheduleLocalization( (uchar*)img.data, sizeof(float)*img.w, QTrkFloat, &job);
cpu.ScheduleLocalization( (uchar*)img.data, sizeof(float)*img.w, QTrkFloat, &job);
}
gpu.Flush();
cpu.Flush();
while(cpu.GetResultCount() != N || gpu.GetResultCount() != N );
ImageData dbgImg = cpu.DebugImage(0);
FloatToJPEGFile("qidbgimg.jpg", dbgImg.data, dbgImg.w, dbgImg.h);
auto rcpu = FetchResults(&cpu), rgpu = FetchResults(&gpu);
for (int i=0;i<N;i++) {
vector3f d=rcpu[i]-rgpu[i];
dbgprintf("[%d]: CPU: x=%f, y=%f. GPU: x=%f, y=%f.\tGPU-CPU: x:%f, y:%f\n", i, rcpu[i].x, rcpu[i].y, rgpu[i].x, rgpu[i].y, d.x,d.y);
}
// Profiles
for(uint i=0;i<cmp_cpu_qi_prof.size();i++) {
dbgprintf("QIPROF[%d]. CPU=%f, GPU=%f, Diff: %f\n", i, cmp_cpu_qi_prof[i], cmp_gpu_qi_prof[i], cmp_gpu_qi_prof[i]-cmp_cpu_qi_prof[i]);
}
// FFT out
for(uint i=0;i<cmp_cpu_qi_fft_out.size();i++) {
dbgprintf("fft-out[%d]. CPU=%f, GPU=%f, Diff: %f\n", i, cmp_cpu_qi_fft_out[i].real(), cmp_gpu_qi_fft_out[i].real(), cmp_gpu_qi_fft_out[i].real()-cmp_cpu_qi_fft_out[i].real());
}
img.free();
lut.free();
}
void TestBenchmarkLUT()
{
BenchmarkLUT bml("refbeadlut.jpg");
ImageData img=ImageData::alloc(120,120);
ImageData lut = ImageData::alloc(bml.lut_w, bml.lut_h);
bml.GenerateLUT(&lut);
WriteJPEGFile("refbeadlut-lutsmp.jpg", lut);
lut.free();
bml.GenerateSample(&img, vector3f(img.w/2,img.h/2,bml.lut_h/2), 0, img.w/2-5);
WriteJPEGFile("refbeadlut-bmsmp.jpg", img);
img.free();
}
template<typename T>
void check_arg(const std::vector<std::string>& args, const char *name, T *param)
{
for (uint i=0;i<args.size();i++) {
if (args[i] == name) {
*param = (T)atof(args[i+1].c_str());
return;
}
}
}
void check_strarg(const std::vector<std::string>& args, const char *name, std::string* param)
{
for (uint i=0;i<args.size();i++) {
if (args[i] == name) {
*param = args[i+1];
return;
}
}
}
int CmdLineRun(int argc, char*argv[])
{
QTrkSettings cfg;
std::vector<std::string> args(argc-1);
for (int i=0;i<argc-1;i++)
args[i]=argv[i+1];
check_arg(args, "roi", &cfg.width);
cfg.height=cfg.width;
int count=100;
check_arg(args, "count", &count);
std::string outputfile, fixlutfile, inputposfile, bmlutfile, rescaledlutfile;
std::string radialWeightsFile;
check_strarg(args, "output", &outputfile);
check_strarg(args, "fixlut", &fixlutfile);
check_strarg(args, "bmlut", &bmlutfile);
check_strarg(args, "inputpos", &inputposfile);
check_strarg(args, "regenlut", &rescaledlutfile);
check_strarg(args, "radweights", &radialWeightsFile);
std::string crlboutput;
check_strarg(args, "crlb", &crlboutput);
std::vector< vector3f > inputPos;
if (!inputposfile.empty()) {
inputPos = ReadVector3CSV(inputposfile.c_str());
count = inputPos.size();
}
check_arg(args, "zlut_minradius", &cfg.zlut_minradius);
check_arg(args, "zlut_radial_coverage", &cfg.zlut_radial_coverage);
check_arg(args, "zlut_angular_coverage", &cfg.zlut_angular_coverage);
check_arg(args, "zlut_roi_coverage", &cfg.zlut_roi_coverage);
check_arg(args, "qi_iterations", &cfg.qi_iterations);
check_arg(args, "qi_minradius", &cfg.qi_minradius);
check_arg(args, "qi_radial_coverage", &cfg.qi_radial_coverage);
check_arg(args, "qi_angular_coverage", &cfg.qi_angular_coverage);
check_arg(args, "qi_roi_coverage", &cfg.qi_roi_coverage);
check_arg(args, "qi_angstep_factor", &cfg.qi_angstep_factor);
check_arg(args, "downsample", &cfg.downsample);
int zlutAlign=0;
check_arg(args, "zlutalign", &zlutAlign);
float pixelmax = 28 * 255;
check_arg(args, "pixelmax", &pixelmax);
std::string lutsmpfile;
check_strarg(args, "lutsmpfile", &lutsmpfile);
int cuda=1;
check_arg(args, "cuda", &cuda);
QueuedTracker* qtrk;
if (cuda) qtrk = new QueuedCUDATracker(cfg);
else qtrk = new QueuedCPUTracker(cfg);
ImageData lut;
BenchmarkLUT bmlut;
if (!fixlutfile.empty())
{
lut = ReadJPEGFile(fixlutfile.c_str());
if(!rescaledlutfile.empty()) {
// rescaling allowed
ImageData newlut;
ResampleLUT(qtrk, &lut, lut.h, &newlut, rescaledlutfile.c_str());
lut.free();
lut=newlut;
}
else if (lut.w != qtrk->cfg.zlut_radialsteps) {
lut.free();
dbgprintf("Invalid LUT size (%d). Expecting %d radialsteps\n", lut.w, qtrk->cfg.zlut_radialsteps);
delete qtrk;
return -1;
}
qtrk->SetRadialZLUT(lut.data,1,lut.h);
}
else
{
if (bmlutfile.empty()) {
delete qtrk;
dbgprintf("No lut file\n");
return -1;
}
bmlut.Load(bmlutfile.c_str());
lut = ImageData::alloc(qtrk->cfg.zlut_radialsteps, bmlut.lut_h);
bmlut.GenerateLUT(&lut);
if (!rescaledlutfile.empty())
WriteJPEGFile(rescaledlutfile.c_str(), lut);
qtrk->SetRadialZLUT(lut.data,1,lut.h);
}
if (inputPos.empty()) {
inputPos.resize(count);
for (int i=0;i<count;i++){
inputPos[i]=vector3f(cfg.width/2,cfg.height/2,lut.h/2);
}
}
if (!radialWeightsFile.empty())
{
auto rwd = ReadCSV(radialWeightsFile.c_str());
std::vector<float> rw(rwd.size());
if (rw.size() == qtrk->cfg.zlut_radialsteps)
qtrk->SetRadialWeights(&rw[0]);
else {
dbgprintf("Invalid # radial weights");
delete qtrk;
}
}
std::vector<ImageData> imgs (inputPos.size());
std::vector<vector3f> crlb(inputPos.size());
for (uint i=0;i<inputPos.size();i++) {
imgs[i]=ImageData::alloc(cfg.width, cfg.height);
//vector3f pos = centerpos + range*vector3f(rand_uniform<float>()-0.5f, rand_uniform<float>()-0.5f, rand_uniform<float>()-0.5f)*2;
auto p = inputPos[i];
if (!bmlut.lut_w) {
GenerateImageFromLUT(&imgs[i], &lut, qtrk->cfg.zlut_minradius, qtrk->cfg.zlut_maxradius, p, false);
if (!crlboutput.empty()) {
SampleFisherMatrix sfm(pixelmax);
crlb[i]=sfm.Compute(p, vector3f(1,1,1)*0.001f, lut, qtrk->cfg.width,qtrk->cfg.height, qtrk->cfg.zlut_minradius, qtrk->cfg.zlut_maxradius).Inverse().diag();
}
} else
bmlut.GenerateSample(&imgs[i], p, qtrk->cfg.zlut_minradius, qtrk->cfg.zlut_maxradius);
imgs[i].normalize();
if (pixelmax > 0) ApplyPoissonNoise(imgs[i], pixelmax, 255);
if(i==0 && !lutsmpfile.empty()) WriteJPEGFile(lutsmpfile.c_str(), imgs[i]);
}
int locMode = LT_LocalizeZ | LT_NormalizeProfile | LT_LocalizeZWeighted;
if (qtrk->cfg.qi_iterations > 0)
locMode |= LT_QI;
if (zlutAlign)
locMode |= LT_ZLUTAlign;
qtrk->SetLocalizationMode((LocMode_t)locMode);
double tstart=GetPreciseTime();
for (uint i=0;i<inputPos.size();i++)
{
LocalizationJob job(i, 0, 0, 0);
qtrk->ScheduleImageData(&imgs[i], &job);
}
WaitForFinish(qtrk, inputPos.size());
double tend = GetPreciseTime();
std::vector<vector3f> results(inputPos.size());
for (uint i=0;i<inputPos.size();i++) {
LocalizationResult r;
qtrk->FetchResults(&r,1);
results[r.job.frame]=r.pos;
}
vector3f meanErr, stdevErr;
MeanStDevError(inputPos, results, meanErr, stdevErr);
dbgprintf("Mean err X=%f,Z=%f. St deviation: X=%f,Z=%f\n", meanErr.x,meanErr.y,stdevErr.x,stdevErr.z);
if (!crlboutput.empty())
WriteTrace(crlboutput, &crlb[0], crlb.size());
WriteTrace(outputfile, &results[0], inputPos.size());
if (lut.data) lut.free();
delete qtrk;
return 0;
}
void BuildZLUT(std::string folder, outputter* output)
{
int ROISize = 100;
std::vector<BeadPos> beads = read_beadlist(SPrintf("%sbeadlist.txt",folder.c_str()));
int numImgInStack = 1218;
int numPositions = 1001; // 10nm/frame
float range = 10.0f; // total range 25.0 um -> 35.0 um
float umPerImg = range/numImgInStack;
QTrkComputedConfig cfg;
cfg.width=cfg.height = ROISize;
cfg.qi_angstep_factor = 1;
cfg.qi_iterations = 6;
cfg.qi_angular_coverage = 0.7f;
cfg.qi_roi_coverage = 1;
cfg.qi_radial_coverage = 1.5f;
cfg.qi_minradius=0;
cfg.zlut_minradius=0;
cfg.zlut_angular_coverage = 0.7f;
cfg.zlut_roi_coverage = 1;
cfg.zlut_radial_coverage = 1.5f;
cfg.zlut_minradius = 0;
cfg.qi_minradius = 0;
cfg.com_bgcorrection = 0;
cfg.xc1_profileLength = ROISize*0.8f;
cfg.xc1_profileWidth = ROISize*0.2f;
cfg.xc1_iterations = 1;
cfg.Update();
cfg.WriteToFile();
int zplanes = 50;
QueuedCUDATracker* qtrk = new QueuedCUDATracker(cfg);
//qtrk->SetLocalizationMode(LT_NormalizeProfile | LT_QI);
qtrk->SetRadialZLUT(0, beads.size(), zplanes);
qtrk->BeginLUT(0);
int pxPerBead = ROISize*ROISize;
int memSizePerBead = pxPerBead*sizeof(float);
int startFrame = 400;
for(int plane = 0; plane < zplanes; plane++){
output->outputString(SPrintf("Frame %d/%d",plane+1,zplanes),true);
int frameNum = startFrame+(int)(numImgInStack-startFrame)*((float)plane/zplanes);
std::string file = SPrintf("%s\img%05d.jpg",folder.c_str(),frameNum);
ImageData frame = ReadJPEGFile(file.c_str());
float* data = new float[beads.size()*pxPerBead];
for(uint ii = 0; ii < beads.size(); ii++){
vector2f pos;
pos.x = beads.at(ii).x - ROISize/2;
pos.y = beads.at(ii).y - ROISize/2;
ImageData crop = CropImage(frame,pos.x,pos.y,ROISize,ROISize);
//output->outputImage(crop,SPrintf("%d-%05d",ii,plane));
memcpy(data+ii*pxPerBead,crop.data,memSizePerBead);
crop.free();
}
/*
// To verify seperate frame bead stack generation
output->newFile(SPrintf("data-plane-%d",plane));
output->outputArray(data,beads.size()*pxPerBead);
ImageData allBeads = ImageData(data,ROISize,ROISize*beads.size());
output->outputImage(allBeads,SPrintf("allBeads-%05d",frameNum));//*/
qtrk->BuildLUT(data, sizeof(float)*ROISize, QTrkFloat, plane);
frame.free();
delete[] data;
}
qtrk->FinalizeLUT();
float* luts = new float[beads.size()*(zplanes*cfg.zlut_radialsteps)];
qtrk->GetRadialZLUT(luts);
for(int ii = 0; ii < beads.size(); ii++){
ImageData lut = ImageData::alloc(cfg.zlut_radialsteps, zplanes);
memcpy(lut.data, &luts[ii*cfg.zlut_radialsteps*zplanes], cfg.zlut_radialsteps*zplanes*sizeof(float));
//memcpy(lut.data,qtrk->GetZLUTByIndex(ii),cfg.zlut_radialsteps*zplanes*sizeof(float));
//output->outputImage(lut,SPrintf("lut%03d,%d",beads.at(ii).x,beads.at(ii).y));
output->outputImage(lut, SPrintf("lut%03d",ii));
lut.free();
}
qtrk->Flush();
delete qtrk;
}
int main(int argc, char *argv[])
{
//listDevices();
printf("%d, %d\n",sizeof(long),sizeof(int));
if (argc > 1)
{
return CmdLineRun(argc, argv);
}
try {
// outputter output(Files+Images);
// BuildZLUT("C:\\TestImages\\TestMovie150507_2\\images\\jpg\\Zstack\\", &output);
BasicQTrkTest();
// BasicQTrkTest_RM();
// TestBenchmarkLUT();
// testLinearArray();
// TestTextureFetch();
// TestGauss2D(true);
// MultipleLUTTest();
// TestSurfaceReadWrite();
// TestImage4D();
// TestImage4DMemory();
// TestImageLUT("../cputrack-test/lut000.jpg");
// TestRadialLUTGradientMethod();
// BenchmarkParams();
// TestTextureFetch();
// QICompare("../cputrack-test/lut000.jpg");
// TestCMOSNoiseInfluence<QueuedCUDATracker>("../cputrack-test/lut000.jpg");
// CompareAccuracy("../cputrack-test/lut000.jpg");
// QTrkCompareTest();
/*
ProfileSpeedVsROI(LT_OnlyCOM, "speeds-com.txt", false, 0);
ProfileSpeedVsROI(LT_OnlyCOM, "speeds-com-z.txt", true, 0);
ProfileSpeedVsROI(LT_XCor1D, "speeds-xcor.txt", true, 0);
for (int qi_it=1;qi_it<=4;qi_it++) {
ProfileSpeedVsROI(LT_QI, SPrintf("speeds-qi-%d-iterations.txt",qi_it).c_str(), true, qi_it);
}*/
/* auto info = SpeedCompareTest(80, false);
auto infogc = SpeedCompareTest(80, true);
dbgprintf("[gainc=false] CPU: %f, GPU: %f\n", info.speed_cpu, info.speed_gpu);
dbgprintf("[gainc=true] CPU: %f, GPU: %f\n", infogc.speed_cpu, infogc.speed_gpu);
*/
} catch (const std::exception& e) {
dbgprintf("Exception: %s\n", e.what());
}
system("pause");
return 0;
} |
d33a64a1ebeca89b76f5189e1b03b1caa051414a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "cuda_utils.h"
#include "distance/distance.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename Type>
__global__ void naiveDistanceAdjKernel(bool *dist, const Type *x, const Type *y,
int m, int n, int k, Type eps) {
int midx = threadIdx.x + blockIdx.x * blockDim.x;
int nidx = threadIdx.y + blockIdx.y * blockDim.y;
if (midx >= m || nidx >= n)
return;
Type acc = Type(0);
for (int i = 0; i < k; ++i) {
auto diff = x[i + midx * k] - y[i + nidx * k];
acc += diff * diff;
}
dist[midx * n + nidx] = acc <= eps;
}
template <typename Type>
void naiveDistanceAdj(bool *dist, const Type *x, const Type *y, int m, int n,
int k, Type eps) {
static const dim3 TPB(16, 32, 1);
dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1);
hipLaunchKernelGGL(( naiveDistanceAdjKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, dist, x, y, m, n, k, eps);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct DistanceAdjInputs {
T eps;
int m, n, k;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const DistanceAdjInputs<T> &dims) {
return os;
}
template <typename T>
struct InParams {
T threshold;
};
template <typename T>
class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<T>> {
public:
void SetUp() override {
params = ::testing::TestWithParam<DistanceAdjInputs<T>>::GetParam();
Random::Rng r(params.seed);
int m = params.m;
int n = params.n;
int k = params.k;
allocate(x, m * k);
allocate(y, n * k);
allocate(dist_ref, m * n);
allocate(dist, m * n);
r.uniform(x, m * k, T(-1.0), T(1.0));
r.uniform(y, n * k, T(-1.0), T(1.0));
InParams<T> in_params = {params.eps};
naiveDistanceAdj(dist_ref, x, y, m, n, k, params.eps);
char *workspace = nullptr;
size_t worksize = 0;
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
distance<T, T, bool, InParams<T>, OutputTile_t>(
x, y, dist, m, n, k, in_params, EucExpandedL2, nullptr, worksize);
if (worksize != 0) {
allocate(workspace, worksize);
}
auto fin_op = [] __device__(T d_val, int g_d_idx,
const InParams<T> &in_params) {
return d_val <= in_params.threshold;
};
distance<T, T, bool, InParams<T>, OutputTile_t>(
x, y, dist, m, n, k, in_params, EucExpandedL2, workspace, worksize,
fin_op);
CUDA_CHECK(hipFree(workspace));
}
void TearDown() override {
CUDA_CHECK(hipFree(x));
CUDA_CHECK(hipFree(y));
CUDA_CHECK(hipFree(dist_ref));
CUDA_CHECK(hipFree(dist));
}
protected:
DistanceAdjInputs<T> params;
T *x, *y;
bool *dist_ref, *dist;
};
const std::vector<DistanceAdjInputs<float>> inputsf = {
{0.01f, 1024, 1024, 32, 1234ULL},
{0.1f, 1024, 1024, 32, 1234ULL},
{1.0f, 1024, 1024, 32, 1234ULL},
{10.0f, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<float> DistanceAdjTestF;
TEST_P(DistanceAdjTestF, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF,
::testing::ValuesIn(inputsf));
const std::vector<DistanceAdjInputs<double>> inputsd = {
{0.01, 1024, 1024, 32, 1234ULL},
{0.1, 1024, 1024, 32, 1234ULL},
{1.0, 1024, 1024, 32, 1234ULL},
{10.0, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<double> DistanceAdjTestD;
TEST_P(DistanceAdjTestD, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD,
::testing::ValuesIn(inputsd));
} // end namespace DistanceAdj
} // end namespace MLCommon
| d33a64a1ebeca89b76f5189e1b03b1caa051414a.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "cuda_utils.h"
#include "distance/distance.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace Distance {
template <typename Type>
__global__ void naiveDistanceAdjKernel(bool *dist, const Type *x, const Type *y,
int m, int n, int k, Type eps) {
int midx = threadIdx.x + blockIdx.x * blockDim.x;
int nidx = threadIdx.y + blockIdx.y * blockDim.y;
if (midx >= m || nidx >= n)
return;
Type acc = Type(0);
for (int i = 0; i < k; ++i) {
auto diff = x[i + midx * k] - y[i + nidx * k];
acc += diff * diff;
}
dist[midx * n + nidx] = acc <= eps;
}
template <typename Type>
void naiveDistanceAdj(bool *dist, const Type *x, const Type *y, int m, int n,
int k, Type eps) {
static const dim3 TPB(16, 32, 1);
dim3 nblks(ceildiv(m, (int)TPB.x), ceildiv(n, (int)TPB.y), 1);
naiveDistanceAdjKernel<Type><<<nblks, TPB>>>(dist, x, y, m, n, k, eps);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct DistanceAdjInputs {
T eps;
int m, n, k;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const DistanceAdjInputs<T> &dims) {
return os;
}
template <typename T>
struct InParams {
T threshold;
};
template <typename T>
class DistanceAdjTest : public ::testing::TestWithParam<DistanceAdjInputs<T>> {
public:
void SetUp() override {
params = ::testing::TestWithParam<DistanceAdjInputs<T>>::GetParam();
Random::Rng r(params.seed);
int m = params.m;
int n = params.n;
int k = params.k;
allocate(x, m * k);
allocate(y, n * k);
allocate(dist_ref, m * n);
allocate(dist, m * n);
r.uniform(x, m * k, T(-1.0), T(1.0));
r.uniform(y, n * k, T(-1.0), T(1.0));
InParams<T> in_params = {params.eps};
naiveDistanceAdj(dist_ref, x, y, m, n, k, params.eps);
char *workspace = nullptr;
size_t worksize = 0;
typedef cutlass::Shape<8, 128, 128> OutputTile_t;
distance<T, T, bool, InParams<T>, OutputTile_t>(
x, y, dist, m, n, k, in_params, EucExpandedL2, nullptr, worksize);
if (worksize != 0) {
allocate(workspace, worksize);
}
auto fin_op = [] __device__(T d_val, int g_d_idx,
const InParams<T> &in_params) {
return d_val <= in_params.threshold;
};
distance<T, T, bool, InParams<T>, OutputTile_t>(
x, y, dist, m, n, k, in_params, EucExpandedL2, workspace, worksize,
fin_op);
CUDA_CHECK(cudaFree(workspace));
}
void TearDown() override {
CUDA_CHECK(cudaFree(x));
CUDA_CHECK(cudaFree(y));
CUDA_CHECK(cudaFree(dist_ref));
CUDA_CHECK(cudaFree(dist));
}
protected:
DistanceAdjInputs<T> params;
T *x, *y;
bool *dist_ref, *dist;
};
const std::vector<DistanceAdjInputs<float>> inputsf = {
{0.01f, 1024, 1024, 32, 1234ULL},
{0.1f, 1024, 1024, 32, 1234ULL},
{1.0f, 1024, 1024, 32, 1234ULL},
{10.0f, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<float> DistanceAdjTestF;
TEST_P(DistanceAdjTestF, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestF,
::testing::ValuesIn(inputsf));
const std::vector<DistanceAdjInputs<double>> inputsd = {
{0.01, 1024, 1024, 32, 1234ULL},
{0.1, 1024, 1024, 32, 1234ULL},
{1.0, 1024, 1024, 32, 1234ULL},
{10.0, 1024, 1024, 32, 1234ULL}};
typedef DistanceAdjTest<double> DistanceAdjTestD;
TEST_P(DistanceAdjTestD, Result) {
ASSERT_TRUE(devArrMatch(dist_ref, dist, params.m, params.n, Compare<bool>()));
}
INSTANTIATE_TEST_CASE_P(DistanceAdjTests, DistanceAdjTestD,
::testing::ValuesIn(inputsd));
} // end namespace DistanceAdj
} // end namespace MLCommon
|
93674c51794fc03090bd1037f159de488321dc0c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_preprocessor.h"
#include <libgpujpeg/gpujpeg_util.h>
#include "gpujpeg_colorspace.h"
#define RGB_8BIT_THREADS 256
/**
* Preprocessor data for component
*/
struct gpujpeg_preprocessor_data_component
{
uint8_t* d_data;
int data_width;
struct gpujpeg_component_sampling_factor sampling_factor;
};
/**
* Preprocessor data
*/
struct gpujpeg_preprocessor_data
{
struct gpujpeg_preprocessor_data_component comp[3];
};
/** Value that means that sampling factor has dynamic value */
#define GPUJPEG_DYNAMIC 16
/** Sampling factor for all components */
typedef int gpujpeg_preprocessor_sampling_factor_t;
/**
* Prepares fixed divisor for dividing unsigned integers up to 2^31
* with unsigned integers up to 2^31.
* Source: http://www.hackersdelight.org/HDcode/magic.c.txt
* Modified for positive numbers only.
*/
static void
gpujpeg_const_div_prepare(const uint32_t d, uint32_t & pre_div_mul, uint32_t & pre_div_shift) {
if(d > 1) {
uint32_t delta;
const uint32_t two31 = 0x80000000; // 2**31.
const uint32_t anc = two31 - 1 - two31 % d; // Absolute value of nc.
int p = 31; // Init. p.
uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
uint32_t q2 = two31 / d; // Init. q2 = 2**p/|d|.
uint32_t r2 = two31 - q2 * d; // Init. r2 = rem(2**p, |d|).
do {
p = p + 1;
q1 = 2 * q1; // Update q1 = 2**p/|nc|.
r1 = 2 * r1; // Update r1 = rem(2**p, |nc|).
if (r1 >= anc) { // (Must be an unsigned
q1 = q1 + 1; // comparison here).
r1 = r1 - anc;
}
q2 = 2 * q2; // Update q2 = 2**p/|d|.
r2 = 2 * r2; // Update r2 = rem(2**p, |d|).
if (r2 >= d) { // (Must be an unsigned
q2 = q2 + 1; // comparison here).
r2 = r2 - d;
}
delta = d - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
pre_div_mul = q2 + 1;
pre_div_shift = p - 32; // shift amount to return.
} else {
pre_div_mul = 0; // special case for d = 1
pre_div_shift = 0;
}
}
/**
* Divides unsigned numerator (up to 2^31) by precomputed constant denominator.
*/
__device__ static uint32_t
gpujpeg_const_div_divide(const uint32_t numerator, const uint32_t pre_div_mul, const uint32_t pre_div_shift) {
return pre_div_mul ? __umulhi(numerator, pre_div_mul) >> pre_div_shift : numerator;
}
/**
* Compose sampling factor for all components to single type
*
* @return integer that contains all sampling factors
*/
inline gpujpeg_preprocessor_sampling_factor_t
gpujpeg_preprocessor_make_sampling_factor(int comp1_h, int comp1_v, int comp2_h, int comp2_v, int comp3_h, int comp3_v)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = 0;
sampling_factor |= ((comp1_h << 4) | comp1_v) << 16;
sampling_factor |= ((comp2_h << 4) | comp2_v) << 8;
sampling_factor |= ((comp3_h << 4) | comp3_v) << 0;
return sampling_factor;
}
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*/
template<
unsigned int s_samp_factor_h,
unsigned int s_samp_factor_v
>
static __device__ void
gpujpeg_preprocessor_raw_to_comp_store(uint8_t value, unsigned int position_x, unsigned int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
const unsigned int samp_factor_h = ( s_samp_factor_h == GPUJPEG_DYNAMIC ) ? comp.sampling_factor.horizontal : s_samp_factor_h;
const unsigned int samp_factor_v = ( s_samp_factor_v == GPUJPEG_DYNAMIC ) ? comp.sampling_factor.vertical : s_samp_factor_v;
if ( (position_x % samp_factor_h) || (position_y % samp_factor_v) )
return;
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
const unsigned int data_position = position_y * comp.data_width + position_x;
comp.d_data[data_position] = value;
}
/**
* Kernel - Copy raw image source data into three separated component buffers
*/
typedef void (*gpujpeg_preprocessor_encode_kernel)(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift);
/** Specialization [sampling factor is 4:4:4] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
// Load to shared
__shared__ unsigned char s_data[RGB_8BIT_THREADS * 3];
if ( (x * 4) < RGB_8BIT_THREADS * 3 ) {
uint32_t* s = (uint32_t *) d_data_raw + ((gX * 3) >> 2) + x;
uint32_t* d = (uint32_t *) s_data + x;
if ((uint8_t *) s < d_data_raw_end) {
*d = *s;
}
}
__syncthreads();
// Load
int offset = x * 3;
uint8_t r1 = s_data[offset];
uint8_t r2 = s_data[offset + 1];
uint8_t r3 = s_data[offset + 2];
// Load Order
gpujpeg_color_order<color_space>::perform_load(r1, r2, r3);
// Color transform
gpujpeg_color_transform<color_space, color_space_internal>::perform(r1, r2, r3);
// Position
int image_position = gX + x;
int image_position_y = gpujpeg_const_div_divide(image_position, width_div_mul, width_div_shift);
int image_position_x = image_position - (image_position_y * image_width);
// Store
if ( image_position < (image_width * image_height) ) {
gpujpeg_preprocessor_raw_to_comp_store<s_comp1_samp_factor_h, s_comp1_samp_factor_v>(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp2_samp_factor_h, s_comp2_samp_factor_v>(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp3_samp_factor_h, s_comp3_samp_factor_v>(r3, image_position_x, image_position_y, data.comp[2]);
}
}
/** Specialization [sampling factor is 4:2:2] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
// Load to shared
__shared__ unsigned char s_data[RGB_8BIT_THREADS * 2];
if ( (x * 4) < RGB_8BIT_THREADS * 2 ) {
uint32_t* s = (uint32_t *) d_data_raw + ((gX * 2) >> 2) + x;
uint32_t* d = (uint32_t *) s_data + x;
if ((uint8_t *) s < d_data_raw_end) {
*d = *s;
}
}
__syncthreads();
// Load
const unsigned int offset = x * 2;
uint8_t r1;
uint8_t r2 = s_data[offset + 1];
uint8_t r3;
if ( (gX + x) % 2 == 0 ) {
r1 = s_data[offset];
r3 = s_data[offset + 2];
} else {
r1 = s_data[offset - 2];
r3 = s_data[offset];
}
// Load Order
gpujpeg_color_order<color_space>::perform_load(r1, r2, r3);
// Color transform
gpujpeg_color_transform<color_space, color_space_internal>::perform(r1, r2, r3);
// Position
int image_position = gX + x;
int image_position_y = gpujpeg_const_div_divide(image_position, width_div_mul, width_div_shift);
int image_position_x = image_position - (image_position_y * image_width);
// Store
if ( image_position < (image_width * image_height) ) {
gpujpeg_preprocessor_raw_to_comp_store<s_comp1_samp_factor_h, s_comp1_samp_factor_v>(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp2_samp_factor_h, s_comp2_samp_factor_v>(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp3_samp_factor_h, s_comp3_samp_factor_v>(r3, image_position_x, image_position_y, data.comp[2]);
}
}
/**
* Select preprocessor encode kernel
*
* @param encoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_encode_kernel
gpujpeg_preprocessor_select_encode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical
);
#define RETURN_KERNEL_IF(KERNEL, COLOR, P1, P2, P3, P4, P5, P6) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose ) { \
printf("Using faster kernel for preprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
return &KERNEL<color_space_internal, COLOR, P1, P2, P3, P4, P5, P6>; \
}
#define RETURN_KERNEL(KERNEL, COLOR) \
RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 1, 1, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 2, 2, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 2, 1, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 1, 2, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 4, 4, 4, 4) \
else { \
if ( coder->param.verbose ) { \
printf("Using slower kernel for preprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
return &KERNEL<color_space_internal, COLOR, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_NONE);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_NONE);
} else {
assert(false);
}
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_RGB);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_RGB);
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YCBCR_BT601);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YCBCR_BT601);
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YCBCR_BT601_256LVLS);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YCBCR_BT601_256LVLS);
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YCBCR_BT709);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YCBCR_BT709);
} else {
assert(false);
}
}
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YUV);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YUV);
} else {
assert(false);
}
}
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_encoder_init(struct gpujpeg_coder* coder)
{
if ( coder->param_image.comp_count == 1 ) {
return 0;
}
assert(coder->param_image.comp_count == 3);
switch (coder->param_image.pixel_format) {
case GPUJPEG_444_U8_P012:
case GPUJPEG_422_U8_P1020:
{
coder->preprocessor = NULL;
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else {
assert(false);
}
if ( coder->preprocessor == NULL ) {
return -1;
}
break;
}
default:
{
coder->preprocessor = NULL;
break;
}
}
return 0;
}
int
gpujpeg_preprocessor_encode_interlaced(struct gpujpeg_encoder * encoder)
{
struct gpujpeg_coder* coder = &encoder->coder;
hipMemsetAsync(coder->d_data, 0, coder->data_size * sizeof(uint8_t), *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor memset failed", return -1);
// Select kernel
gpujpeg_preprocessor_encode_kernel kernel = (gpujpeg_preprocessor_encode_kernel) coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When loading 4:2:2 data of odd width, the data in fact has even width, so round it
// (at least imagemagick convert tool generates data stream in this way)
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = (coder->param_image.width + 1) & ~1;
}
// Prepare unit size
assert(coder->param_image.pixel_format == GPUJPEG_444_U8_P012 || coder->param_image.pixel_format == GPUJPEG_422_U8_P1020);
int unitSize = (coder->param_image.pixel_format >= GPUJPEG_444_U8_P012 && coder->param_image.pixel_format <= GPUJPEG_444_U8_P0P1P2) ? 3 : 2;
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
while ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y *= 2;
grid.x = gpujpeg_div_and_round_up(grid.x, 2);
}
// Decompose input image width for faster division using multiply-high and right shift
uint32_t width_div_mul, width_div_shift;
gpujpeg_const_div_prepare(image_width, width_div_mul, width_div_shift);
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < 3; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, *(encoder->stream),
data,
coder->d_data_raw,
coder->d_data_raw + coder->data_raw_size,
image_width,
image_height,
width_div_mul,
width_div_shift
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_encode(struct gpujpeg_encoder * encoder)
{
struct gpujpeg_coder * coder = &encoder->coder;
switch (coder->param_image.pixel_format) {
case GPUJPEG_U8:
{
assert(coder->param_image.comp_count == 1);
hipMemcpyAsync(coder->d_data, coder->d_data_raw, coder->data_raw_size * sizeof(uint8_t), hipMemcpyDeviceToDevice, *(encoder->stream));
return 0;
}
case GPUJPEG_444_U8_P012:
case GPUJPEG_422_U8_P1020:
{
assert(coder->param_image.comp_count == 3);
return gpujpeg_preprocessor_encode_interlaced(encoder);
}
case GPUJPEG_444_U8_P0P1P2:
{
if (coder->component[0].sampling_factor.horizontal != 1 || coder->component[0].sampling_factor.vertical != 1
|| coder->component[1].sampling_factor.horizontal != 1 || coder->component[1].sampling_factor.vertical != 1
|| coder->component[2].sampling_factor.horizontal != 1 || coder->component[2].sampling_factor.vertical != 1) {
fprintf(stderr, "Encoding JPEG from pixel format 444-u8-p0p1p2 is supported only when 4:4:4 subsampling inside JPEG is used.");
return -1;
}
if (coder->param_image.color_space != GPUJPEG_NONE) {
fprintf(stderr, "Encoding JPEG from pixel format 444-u8-p0p1p2 is supported only when no color transformation is required.");
return -1;
}
size_t data_raw_offset = 0;
size_t component_size = coder->param_image.width * coder->param_image.height;
hipMemcpyAsync(coder->component[0].d_data, coder->d_data_raw + data_raw_offset, component_size, hipMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size;
hipMemcpyAsync(coder->component[1].d_data, coder->d_data_raw + data_raw_offset, component_size, hipMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size;
hipMemcpyAsync(coder->component[2].d_data, coder->d_data_raw + data_raw_offset, component_size, hipMemcpyDeviceToDevice, *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
case GPUJPEG_422_U8_P0P1P2:
{
if (coder->component[0].sampling_factor.horizontal != 2 || coder->component[0].sampling_factor.vertical != 1
|| coder->component[1].sampling_factor.horizontal != 1 || coder->component[1].sampling_factor.vertical != 1
|| coder->component[2].sampling_factor.horizontal != 1 || coder->component[2].sampling_factor.vertical != 1) {
fprintf(stderr, "Encoding JPEG from pixel format 422-u8-p0p1p2 is supported only to 4:2:2 subsampling inside JPEG is used.");
return -1;
}
if (coder->param_image.color_space != GPUJPEG_NONE) {
fprintf(stderr, "Encoding JPEG from pixel format 420-u8-p0p1p2 is supported only when no color transformation is required.");
return -1;
}
size_t data_raw_offset = 0;
size_t component_size_y = coder->component[0].data_width * coder->component[0].data_height;
size_t component_size_uv = coder->component[1].data_width * coder->component[1].data_height;
assert(coder->component[1].data_width == coder->component[2].data_width);
assert(coder->component[1].data_height == coder->component[2].data_height);
hipMemcpyAsync(coder->component[0].d_data, coder->d_data_raw + data_raw_offset, component_size_y, hipMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_y;
hipMemcpyAsync(coder->component[1].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, hipMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_uv;
hipMemcpyAsync(coder->component[2].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, hipMemcpyDeviceToDevice, *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
case GPUJPEG_420_U8_P0P1P2:
{
if (coder->component[0].sampling_factor.horizontal != 2 || coder->component[0].sampling_factor.vertical != 2
|| coder->component[1].sampling_factor.horizontal != 1 || coder->component[1].sampling_factor.vertical != 1
|| coder->component[2].sampling_factor.horizontal != 1 || coder->component[2].sampling_factor.vertical != 1) {
fprintf(stderr, "Encoding JPEG from pixel format 420-u8-p0p1p2 is supported only to 4:2:0 subsampling inside JPEG is used.");
return -1;
}
if (coder->param_image.color_space != GPUJPEG_NONE) {
fprintf(stderr, "Encoding JPEG from pixel format 420-u8-p0p1p2 is supported only when no color transformation is required.");
return -1;
}
size_t data_raw_offset = 0;
size_t component_size_y = coder->component[0].data_width * coder->component[0].data_height;
size_t component_size_uv = coder->component[1].data_width * coder->component[1].data_height;
assert(coder->component[1].data_width == coder->component[2].data_width);
assert(coder->component[1].data_height == coder->component[2].data_height);
hipMemcpyAsync(coder->component[0].d_data, coder->d_data_raw + data_raw_offset, component_size_y, hipMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_y;
hipMemcpyAsync(coder->component[1].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, hipMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_uv;
hipMemcpyAsync(coder->component[2].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, hipMemcpyDeviceToDevice, *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
default:
{
fprintf(stderr, "Unknown pixel format %d to be preprocessed.", coder->param_image.pixel_format);
return -1;
}
}
}
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*
* @param value
* @param position_x
* @param position_y
* @param comp
*/
template<
uint8_t s_samp_factor_h = GPUJPEG_DYNAMIC,
uint8_t s_samp_factor_v = GPUJPEG_DYNAMIC
>
struct gpujpeg_preprocessor_comp_to_raw_load
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
uint8_t samp_factor_h = s_samp_factor_h;
if ( samp_factor_h == GPUJPEG_DYNAMIC ) {
samp_factor_h = comp.sampling_factor.horizontal;
}
uint8_t samp_factor_v = s_samp_factor_v;
if ( samp_factor_v == GPUJPEG_DYNAMIC ) {
samp_factor_v = comp.sampling_factor.vertical;
}
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<>
struct gpujpeg_preprocessor_comp_to_raw_load<1, 1>
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
/**
* Kernel - Copy three separated component buffers into target image data
*
* @param d_c1 First component buffer
* @param d_c2 Second component buffer
* @param d_c3 Third component buffer
* @param d_target Image target data
* @param pixel_count Number of pixels to copy
* @return void
*/
typedef void (*gpujpeg_preprocessor_decode_kernel)(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height);
/** Specialization [sampling factor is 4:4:4] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uint8_t r1;
uint8_t r2;
uint8_t r3;
gpujpeg_preprocessor_comp_to_raw_load<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(r3, image_position_x, image_position_y, data.comp[2]);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r1, r2, r3);
// Store Order
gpujpeg_color_order<color_space>::perform_store(r1, r2, r3);
// Save
image_position = image_position * 3;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
}
/** Specialization [sampling factor is 4:2:2] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uint8_t r1;
uint8_t r2;
uint8_t r3;
gpujpeg_preprocessor_comp_to_raw_load<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(r3, image_position_x, image_position_y, data.comp[2]);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r1, r2, r3);
// Store Order
gpujpeg_color_order<color_space>::perform_store(r1, r2, r3);
// Save
image_position = image_position * 2;
d_data_raw[image_position + 1] = r2;
if ( (image_position_x % 2) == 0 )
d_data_raw[image_position + 0] = r1;
else
d_data_raw[image_position + 0] = r3;
}
/**
* Select preprocessor decode kernel
*
* @param decoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_decode_kernel
gpujpeg_preprocessor_select_decode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical
);
#define RETURN_KERNEL_IF(KERNEL, COLOR, P1, P2, P3, P4, P5, P6) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose ) { \
printf("Using faster kernel for postprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
return &KERNEL<color_space_internal, COLOR, P1, P2, P3, P4, P5, P6>; \
}
#define RETURN_KERNEL(KERNEL, COLOR) \
RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 1, 1, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 2, 2, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 2, 1, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 1, 2, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 4, 4, 4, 4) \
else { \
if ( coder->param.verbose ) { \
printf("Using slower kernel for postprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
return &KERNEL<color_space_internal, COLOR, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_NONE)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_NONE)
} else {
assert(false);
}
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_RGB)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_RGB)
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YCBCR_BT601)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YCBCR_BT601)
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YCBCR_BT601_256LVLS)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YCBCR_BT601_256LVLS)
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YCBCR_BT709)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YCBCR_BT709)
} else {
assert(false);
}
}
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YUV)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YUV)
} else {
assert(false);
}
}
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_decoder_init(struct gpujpeg_coder* coder)
{
if (coder->param_image.comp_count == 1) {
return 0;
}
assert(coder->param_image.comp_count == 3);
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else {
assert(false);
}
if (coder->preprocessor == NULL) {
return -1;
}
return 0;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_decode(struct gpujpeg_coder* coder, hipStream_t stream)
{
if (coder->param_image.comp_count == 1) {
hipMemcpyAsync(coder->d_data_raw, coder->d_data, coder->data_raw_size * sizeof(uint8_t), hipMemcpyDeviceToDevice, stream);
return 0;
}
assert(coder->param_image.comp_count == 3);
hipMemsetAsync(coder->d_data_raw, 0, coder->data_raw_size * sizeof(uint8_t), stream);
// Select kernel
gpujpeg_preprocessor_decode_kernel kernel = (gpujpeg_preprocessor_decode_kernel)coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When saving 4:2:2 data of odd width, the data should have even width, so round it
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = gpujpeg_div_and_round_up(coder->param_image.width, 2) * 2;
}
// Prepare unit size
int unitSize = (coder->param_image.pixel_format >= GPUJPEG_444_U8_P012 && coder->param_image.pixel_format <= GPUJPEG_444_U8_P0P1P2) ? 3 : 2;
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
if ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y = gpujpeg_div_and_round_up(grid.x, GPUJPEG_CUDA_MAXIMUM_GRID_SIZE);
grid.x = GPUJPEG_CUDA_MAXIMUM_GRID_SIZE;
}
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < 3; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(threads), 0, stream,
data,
coder->d_data_raw,
image_width,
image_height
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
| 93674c51794fc03090bd1037f159de488321dc0c.cu | /**
* Copyright (c) 2011, CESNET z.s.p.o
* Copyright (c) 2011, Silicon Genome, LLC.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "gpujpeg_preprocessor.h"
#include <libgpujpeg/gpujpeg_util.h>
#include "gpujpeg_colorspace.h"
#define RGB_8BIT_THREADS 256
/**
* Preprocessor data for component
*/
struct gpujpeg_preprocessor_data_component
{
uint8_t* d_data;
int data_width;
struct gpujpeg_component_sampling_factor sampling_factor;
};
/**
* Preprocessor data
*/
struct gpujpeg_preprocessor_data
{
struct gpujpeg_preprocessor_data_component comp[3];
};
/** Value that means that sampling factor has dynamic value */
#define GPUJPEG_DYNAMIC 16
/** Sampling factor for all components */
typedef int gpujpeg_preprocessor_sampling_factor_t;
/**
* Prepares fixed divisor for dividing unsigned integers up to 2^31
* with unsigned integers up to 2^31.
* Source: http://www.hackersdelight.org/HDcode/magic.c.txt
* Modified for positive numbers only.
*/
static void
gpujpeg_const_div_prepare(const uint32_t d, uint32_t & pre_div_mul, uint32_t & pre_div_shift) {
if(d > 1) {
uint32_t delta;
const uint32_t two31 = 0x80000000; // 2**31.
const uint32_t anc = two31 - 1 - two31 % d; // Absolute value of nc.
int p = 31; // Init. p.
uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
uint32_t q2 = two31 / d; // Init. q2 = 2**p/|d|.
uint32_t r2 = two31 - q2 * d; // Init. r2 = rem(2**p, |d|).
do {
p = p + 1;
q1 = 2 * q1; // Update q1 = 2**p/|nc|.
r1 = 2 * r1; // Update r1 = rem(2**p, |nc|).
if (r1 >= anc) { // (Must be an unsigned
q1 = q1 + 1; // comparison here).
r1 = r1 - anc;
}
q2 = 2 * q2; // Update q2 = 2**p/|d|.
r2 = 2 * r2; // Update r2 = rem(2**p, |d|).
if (r2 >= d) { // (Must be an unsigned
q2 = q2 + 1; // comparison here).
r2 = r2 - d;
}
delta = d - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
pre_div_mul = q2 + 1;
pre_div_shift = p - 32; // shift amount to return.
} else {
pre_div_mul = 0; // special case for d = 1
pre_div_shift = 0;
}
}
/**
* Divides unsigned numerator (up to 2^31) by precomputed constant denominator.
*/
__device__ static uint32_t
gpujpeg_const_div_divide(const uint32_t numerator, const uint32_t pre_div_mul, const uint32_t pre_div_shift) {
return pre_div_mul ? __umulhi(numerator, pre_div_mul) >> pre_div_shift : numerator;
}
/**
* Compose sampling factor for all components to single type
*
* @return integer that contains all sampling factors
*/
inline gpujpeg_preprocessor_sampling_factor_t
gpujpeg_preprocessor_make_sampling_factor(int comp1_h, int comp1_v, int comp2_h, int comp2_v, int comp3_h, int comp3_v)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = 0;
sampling_factor |= ((comp1_h << 4) | comp1_v) << 16;
sampling_factor |= ((comp2_h << 4) | comp2_v) << 8;
sampling_factor |= ((comp3_h << 4) | comp3_v) << 0;
return sampling_factor;
}
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*/
template<
unsigned int s_samp_factor_h,
unsigned int s_samp_factor_v
>
static __device__ void
gpujpeg_preprocessor_raw_to_comp_store(uint8_t value, unsigned int position_x, unsigned int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
const unsigned int samp_factor_h = ( s_samp_factor_h == GPUJPEG_DYNAMIC ) ? comp.sampling_factor.horizontal : s_samp_factor_h;
const unsigned int samp_factor_v = ( s_samp_factor_v == GPUJPEG_DYNAMIC ) ? comp.sampling_factor.vertical : s_samp_factor_v;
if ( (position_x % samp_factor_h) || (position_y % samp_factor_v) )
return;
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
const unsigned int data_position = position_y * comp.data_width + position_x;
comp.d_data[data_position] = value;
}
/**
* Kernel - Copy raw image source data into three separated component buffers
*/
typedef void (*gpujpeg_preprocessor_encode_kernel)(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift);
/** Specialization [sampling factor is 4:4:4] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
// Load to shared
__shared__ unsigned char s_data[RGB_8BIT_THREADS * 3];
if ( (x * 4) < RGB_8BIT_THREADS * 3 ) {
uint32_t* s = (uint32_t *) d_data_raw + ((gX * 3) >> 2) + x;
uint32_t* d = (uint32_t *) s_data + x;
if ((uint8_t *) s < d_data_raw_end) {
*d = *s;
}
}
__syncthreads();
// Load
int offset = x * 3;
uint8_t r1 = s_data[offset];
uint8_t r2 = s_data[offset + 1];
uint8_t r3 = s_data[offset + 2];
// Load Order
gpujpeg_color_order<color_space>::perform_load(r1, r2, r3);
// Color transform
gpujpeg_color_transform<color_space, color_space_internal>::perform(r1, r2, r3);
// Position
int image_position = gX + x;
int image_position_y = gpujpeg_const_div_divide(image_position, width_div_mul, width_div_shift);
int image_position_x = image_position - (image_position_y * image_width);
// Store
if ( image_position < (image_width * image_height) ) {
gpujpeg_preprocessor_raw_to_comp_store<s_comp1_samp_factor_h, s_comp1_samp_factor_v>(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp2_samp_factor_h, s_comp2_samp_factor_v>(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp3_samp_factor_h, s_comp3_samp_factor_v>(r3, image_position_x, image_position_y, data.comp[2]);
}
}
/** Specialization [sampling factor is 4:2:2] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2(struct gpujpeg_preprocessor_data data, const uint8_t* d_data_raw, const uint8_t* d_data_raw_end, int image_width, int image_height, uint32_t width_div_mul, uint32_t width_div_shift)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
// Load to shared
__shared__ unsigned char s_data[RGB_8BIT_THREADS * 2];
if ( (x * 4) < RGB_8BIT_THREADS * 2 ) {
uint32_t* s = (uint32_t *) d_data_raw + ((gX * 2) >> 2) + x;
uint32_t* d = (uint32_t *) s_data + x;
if ((uint8_t *) s < d_data_raw_end) {
*d = *s;
}
}
__syncthreads();
// Load
const unsigned int offset = x * 2;
uint8_t r1;
uint8_t r2 = s_data[offset + 1];
uint8_t r3;
if ( (gX + x) % 2 == 0 ) {
r1 = s_data[offset];
r3 = s_data[offset + 2];
} else {
r1 = s_data[offset - 2];
r3 = s_data[offset];
}
// Load Order
gpujpeg_color_order<color_space>::perform_load(r1, r2, r3);
// Color transform
gpujpeg_color_transform<color_space, color_space_internal>::perform(r1, r2, r3);
// Position
int image_position = gX + x;
int image_position_y = gpujpeg_const_div_divide(image_position, width_div_mul, width_div_shift);
int image_position_x = image_position - (image_position_y * image_width);
// Store
if ( image_position < (image_width * image_height) ) {
gpujpeg_preprocessor_raw_to_comp_store<s_comp1_samp_factor_h, s_comp1_samp_factor_v>(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp2_samp_factor_h, s_comp2_samp_factor_v>(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_raw_to_comp_store<s_comp3_samp_factor_h, s_comp3_samp_factor_v>(r3, image_position_x, image_position_y, data.comp[2]);
}
}
/**
* Select preprocessor encode kernel
*
* @param encoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_encode_kernel
gpujpeg_preprocessor_select_encode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical
);
#define RETURN_KERNEL_IF(KERNEL, COLOR, P1, P2, P3, P4, P5, P6) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose ) { \
printf("Using faster kernel for preprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
return &KERNEL<color_space_internal, COLOR, P1, P2, P3, P4, P5, P6>; \
}
#define RETURN_KERNEL(KERNEL, COLOR) \
RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 1, 1, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 2, 2, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 2, 1, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 1, 2, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 4, 4, 4, 4) \
else { \
if ( coder->param.verbose ) { \
printf("Using slower kernel for preprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
return &KERNEL<color_space_internal, COLOR, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_NONE);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_NONE);
} else {
assert(false);
}
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_RGB);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_RGB);
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YCBCR_BT601);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YCBCR_BT601);
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YCBCR_BT601_256LVLS);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YCBCR_BT601_256LVLS);
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YCBCR_BT709);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YCBCR_BT709);
} else {
assert(false);
}
}
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_4_4, GPUJPEG_YUV);
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_raw_to_comp_kernel_4_2_2, GPUJPEG_YUV);
} else {
assert(false);
}
}
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_encoder_init(struct gpujpeg_coder* coder)
{
if ( coder->param_image.comp_count == 1 ) {
return 0;
}
assert(coder->param_image.comp_count == 3);
switch (coder->param_image.pixel_format) {
case GPUJPEG_444_U8_P012:
case GPUJPEG_422_U8_P1020:
{
coder->preprocessor = NULL;
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_encode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else {
assert(false);
}
if ( coder->preprocessor == NULL ) {
return -1;
}
break;
}
default:
{
coder->preprocessor = NULL;
break;
}
}
return 0;
}
int
gpujpeg_preprocessor_encode_interlaced(struct gpujpeg_encoder * encoder)
{
struct gpujpeg_coder* coder = &encoder->coder;
cudaMemsetAsync(coder->d_data, 0, coder->data_size * sizeof(uint8_t), *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor memset failed", return -1);
// Select kernel
gpujpeg_preprocessor_encode_kernel kernel = (gpujpeg_preprocessor_encode_kernel) coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When loading 4:2:2 data of odd width, the data in fact has even width, so round it
// (at least imagemagick convert tool generates data stream in this way)
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = (coder->param_image.width + 1) & ~1;
}
// Prepare unit size
assert(coder->param_image.pixel_format == GPUJPEG_444_U8_P012 || coder->param_image.pixel_format == GPUJPEG_422_U8_P1020);
int unitSize = (coder->param_image.pixel_format >= GPUJPEG_444_U8_P012 && coder->param_image.pixel_format <= GPUJPEG_444_U8_P0P1P2) ? 3 : 2;
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
while ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y *= 2;
grid.x = gpujpeg_div_and_round_up(grid.x, 2);
}
// Decompose input image width for faster division using multiply-high and right shift
uint32_t width_div_mul, width_div_shift;
gpujpeg_const_div_prepare(image_width, width_div_mul, width_div_shift);
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < 3; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
kernel<<<grid, threads, 0, *(encoder->stream)>>>(
data,
coder->d_data_raw,
coder->d_data_raw + coder->data_raw_size,
image_width,
image_height,
width_div_mul,
width_div_shift
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_encode(struct gpujpeg_encoder * encoder)
{
struct gpujpeg_coder * coder = &encoder->coder;
switch (coder->param_image.pixel_format) {
case GPUJPEG_U8:
{
assert(coder->param_image.comp_count == 1);
cudaMemcpyAsync(coder->d_data, coder->d_data_raw, coder->data_raw_size * sizeof(uint8_t), cudaMemcpyDeviceToDevice, *(encoder->stream));
return 0;
}
case GPUJPEG_444_U8_P012:
case GPUJPEG_422_U8_P1020:
{
assert(coder->param_image.comp_count == 3);
return gpujpeg_preprocessor_encode_interlaced(encoder);
}
case GPUJPEG_444_U8_P0P1P2:
{
if (coder->component[0].sampling_factor.horizontal != 1 || coder->component[0].sampling_factor.vertical != 1
|| coder->component[1].sampling_factor.horizontal != 1 || coder->component[1].sampling_factor.vertical != 1
|| coder->component[2].sampling_factor.horizontal != 1 || coder->component[2].sampling_factor.vertical != 1) {
fprintf(stderr, "Encoding JPEG from pixel format 444-u8-p0p1p2 is supported only when 4:4:4 subsampling inside JPEG is used.");
return -1;
}
if (coder->param_image.color_space != GPUJPEG_NONE) {
fprintf(stderr, "Encoding JPEG from pixel format 444-u8-p0p1p2 is supported only when no color transformation is required.");
return -1;
}
size_t data_raw_offset = 0;
size_t component_size = coder->param_image.width * coder->param_image.height;
cudaMemcpyAsync(coder->component[0].d_data, coder->d_data_raw + data_raw_offset, component_size, cudaMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size;
cudaMemcpyAsync(coder->component[1].d_data, coder->d_data_raw + data_raw_offset, component_size, cudaMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size;
cudaMemcpyAsync(coder->component[2].d_data, coder->d_data_raw + data_raw_offset, component_size, cudaMemcpyDeviceToDevice, *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
case GPUJPEG_422_U8_P0P1P2:
{
if (coder->component[0].sampling_factor.horizontal != 2 || coder->component[0].sampling_factor.vertical != 1
|| coder->component[1].sampling_factor.horizontal != 1 || coder->component[1].sampling_factor.vertical != 1
|| coder->component[2].sampling_factor.horizontal != 1 || coder->component[2].sampling_factor.vertical != 1) {
fprintf(stderr, "Encoding JPEG from pixel format 422-u8-p0p1p2 is supported only to 4:2:2 subsampling inside JPEG is used.");
return -1;
}
if (coder->param_image.color_space != GPUJPEG_NONE) {
fprintf(stderr, "Encoding JPEG from pixel format 420-u8-p0p1p2 is supported only when no color transformation is required.");
return -1;
}
size_t data_raw_offset = 0;
size_t component_size_y = coder->component[0].data_width * coder->component[0].data_height;
size_t component_size_uv = coder->component[1].data_width * coder->component[1].data_height;
assert(coder->component[1].data_width == coder->component[2].data_width);
assert(coder->component[1].data_height == coder->component[2].data_height);
cudaMemcpyAsync(coder->component[0].d_data, coder->d_data_raw + data_raw_offset, component_size_y, cudaMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_y;
cudaMemcpyAsync(coder->component[1].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, cudaMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_uv;
cudaMemcpyAsync(coder->component[2].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, cudaMemcpyDeviceToDevice, *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
case GPUJPEG_420_U8_P0P1P2:
{
if (coder->component[0].sampling_factor.horizontal != 2 || coder->component[0].sampling_factor.vertical != 2
|| coder->component[1].sampling_factor.horizontal != 1 || coder->component[1].sampling_factor.vertical != 1
|| coder->component[2].sampling_factor.horizontal != 1 || coder->component[2].sampling_factor.vertical != 1) {
fprintf(stderr, "Encoding JPEG from pixel format 420-u8-p0p1p2 is supported only to 4:2:0 subsampling inside JPEG is used.");
return -1;
}
if (coder->param_image.color_space != GPUJPEG_NONE) {
fprintf(stderr, "Encoding JPEG from pixel format 420-u8-p0p1p2 is supported only when no color transformation is required.");
return -1;
}
size_t data_raw_offset = 0;
size_t component_size_y = coder->component[0].data_width * coder->component[0].data_height;
size_t component_size_uv = coder->component[1].data_width * coder->component[1].data_height;
assert(coder->component[1].data_width == coder->component[2].data_width);
assert(coder->component[1].data_height == coder->component[2].data_height);
cudaMemcpyAsync(coder->component[0].d_data, coder->d_data_raw + data_raw_offset, component_size_y, cudaMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_y;
cudaMemcpyAsync(coder->component[1].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, cudaMemcpyDeviceToDevice, *(encoder->stream));
data_raw_offset += component_size_uv;
cudaMemcpyAsync(coder->component[2].d_data, coder->d_data_raw + data_raw_offset, component_size_uv, cudaMemcpyDeviceToDevice, *(encoder->stream));
gpujpeg_cuda_check_error("Preprocessor copy failed", return -1);
return 0;
}
default:
{
fprintf(stderr, "Unknown pixel format %d to be preprocessed.", coder->param_image.pixel_format);
return -1;
}
}
}
/**
* Store value to component data buffer in specified position by buffer size and subsampling
*
* @param value
* @param position_x
* @param position_y
* @param comp
*/
template<
uint8_t s_samp_factor_h = GPUJPEG_DYNAMIC,
uint8_t s_samp_factor_v = GPUJPEG_DYNAMIC
>
struct gpujpeg_preprocessor_comp_to_raw_load
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
uint8_t samp_factor_h = s_samp_factor_h;
if ( samp_factor_h == GPUJPEG_DYNAMIC ) {
samp_factor_h = comp.sampling_factor.horizontal;
}
uint8_t samp_factor_v = s_samp_factor_v;
if ( samp_factor_v == GPUJPEG_DYNAMIC ) {
samp_factor_v = comp.sampling_factor.vertical;
}
position_x = position_x / samp_factor_h;
position_y = position_y / samp_factor_v;
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
template<>
struct gpujpeg_preprocessor_comp_to_raw_load<1, 1>
{
static __device__ void
perform(uint8_t & value, int position_x, int position_y, struct gpujpeg_preprocessor_data_component & comp)
{
int data_position = position_y * comp.data_width + position_x;
value = comp.d_data[data_position];
}
};
/**
* Kernel - Copy three separated component buffers into target image data
*
* @param d_c1 First component buffer
* @param d_c2 Second component buffer
* @param d_c3 Third component buffer
* @param d_target Image target data
* @param pixel_count Number of pixels to copy
* @return void
*/
typedef void (*gpujpeg_preprocessor_decode_kernel)(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height);
/** Specialization [sampling factor is 4:4:4] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uint8_t r1;
uint8_t r2;
uint8_t r3;
gpujpeg_preprocessor_comp_to_raw_load<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(r3, image_position_x, image_position_y, data.comp[2]);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r1, r2, r3);
// Store Order
gpujpeg_color_order<color_space>::perform_store(r1, r2, r3);
// Save
image_position = image_position * 3;
d_data_raw[image_position + 0] = r1;
d_data_raw[image_position + 1] = r2;
d_data_raw[image_position + 2] = r3;
}
/** Specialization [sampling factor is 4:2:2] */
template<
enum gpujpeg_color_space color_space_internal,
enum gpujpeg_color_space color_space,
uint8_t s_comp1_samp_factor_h, uint8_t s_comp1_samp_factor_v,
uint8_t s_comp2_samp_factor_h, uint8_t s_comp2_samp_factor_v,
uint8_t s_comp3_samp_factor_h, uint8_t s_comp3_samp_factor_v
>
__global__ void
gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2(struct gpujpeg_preprocessor_data data, uint8_t* d_data_raw, int image_width, int image_height)
{
int x = threadIdx.x;
int gX = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x;
int image_position = gX + x;
if ( image_position >= (image_width * image_height) )
return;
int image_position_x = image_position % image_width;
int image_position_y = image_position / image_width;
// Load
uint8_t r1;
uint8_t r2;
uint8_t r3;
gpujpeg_preprocessor_comp_to_raw_load<s_comp1_samp_factor_h, s_comp1_samp_factor_v>::perform(r1, image_position_x, image_position_y, data.comp[0]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp2_samp_factor_h, s_comp2_samp_factor_v>::perform(r2, image_position_x, image_position_y, data.comp[1]);
gpujpeg_preprocessor_comp_to_raw_load<s_comp3_samp_factor_h, s_comp3_samp_factor_v>::perform(r3, image_position_x, image_position_y, data.comp[2]);
// Color transform
gpujpeg_color_transform<color_space_internal, color_space>::perform(r1, r2, r3);
// Store Order
gpujpeg_color_order<color_space>::perform_store(r1, r2, r3);
// Save
image_position = image_position * 2;
d_data_raw[image_position + 1] = r2;
if ( (image_position_x % 2) == 0 )
d_data_raw[image_position + 0] = r1;
else
d_data_raw[image_position + 0] = r3;
}
/**
* Select preprocessor decode kernel
*
* @param decoder
* @return kernel
*/
template<enum gpujpeg_color_space color_space_internal>
gpujpeg_preprocessor_decode_kernel
gpujpeg_preprocessor_select_decode_kernel(struct gpujpeg_coder* coder)
{
gpujpeg_preprocessor_sampling_factor_t sampling_factor = gpujpeg_preprocessor_make_sampling_factor(
coder->sampling_factor.horizontal / coder->component[0].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[0].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[1].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[1].sampling_factor.vertical,
coder->sampling_factor.horizontal / coder->component[2].sampling_factor.horizontal,
coder->sampling_factor.vertical / coder->component[2].sampling_factor.vertical
);
#define RETURN_KERNEL_IF(KERNEL, COLOR, P1, P2, P3, P4, P5, P6) \
if ( sampling_factor == gpujpeg_preprocessor_make_sampling_factor(P1, P2, P3, P4, P5, P6) ) { \
int max_h = max(P1, max(P3, P5)); \
int max_v = max(P2, max(P4, P6)); \
if ( coder->param.verbose ) { \
printf("Using faster kernel for postprocessor (precompiled %dx%d, %dx%d, %dx%d).\n", max_h / P1, max_v / P2, max_h / P3, max_v / P4, max_h / P5, max_v / P6); \
} \
return &KERNEL<color_space_internal, COLOR, P1, P2, P3, P4, P5, P6>; \
}
#define RETURN_KERNEL(KERNEL, COLOR) \
RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 1, 1, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 2, 2, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 1, 2, 1, 2) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 2, 1, 2, 1) \
else RETURN_KERNEL_IF(KERNEL, COLOR, 1, 1, 4, 4, 4, 4) \
else { \
if ( coder->param.verbose ) { \
printf("Using slower kernel for postprocessor (dynamic %dx%d, %dx%d, %dx%d).\n", coder->component[0].sampling_factor.horizontal, coder->component[0].sampling_factor.vertical, coder->component[1].sampling_factor.horizontal, coder->component[1].sampling_factor.vertical, coder->component[2].sampling_factor.horizontal, coder->component[2].sampling_factor.vertical); \
} \
return &KERNEL<color_space_internal, COLOR, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC, GPUJPEG_DYNAMIC>; \
} \
// None color space
if ( coder->param_image.color_space == GPUJPEG_NONE ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_NONE)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_NONE)
} else {
assert(false);
}
}
// RGB color space
else if ( coder->param_image.color_space == GPUJPEG_RGB ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_RGB)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_RGB)
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YCBCR_BT601)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YCBCR_BT601)
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT601_256LVLS ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YCBCR_BT601_256LVLS)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YCBCR_BT601_256LVLS)
} else {
assert(false);
}
}
// YCbCr color space
else if ( coder->param_image.color_space == GPUJPEG_YCBCR_BT709 ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YCBCR_BT709)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YCBCR_BT709)
} else {
assert(false);
}
}
// YUV color space
else if ( coder->param_image.color_space == GPUJPEG_YUV ) {
if ( coder->param_image.pixel_format == GPUJPEG_444_U8_P012 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_4_4, GPUJPEG_YUV)
} else if ( coder->param_image.pixel_format == GPUJPEG_422_U8_P1020 ) {
RETURN_KERNEL(gpujpeg_preprocessor_comp_to_raw_kernel_4_2_2, GPUJPEG_YUV)
} else {
assert(false);
}
}
// Unknown color space
else {
assert(false);
}
#undef RETURN_KERNEL_IF
#undef RETURN_KERNEL
return NULL;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_decoder_init(struct gpujpeg_coder* coder)
{
if (coder->param_image.comp_count == 1) {
return 0;
}
assert(coder->param_image.comp_count == 3);
if (coder->param.color_space_internal == GPUJPEG_NONE) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_NONE>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_RGB) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_RGB>(coder);
}
else if (coder->param.color_space_internal == GPUJPEG_YCBCR_BT601_256LVLS) {
coder->preprocessor = (void*)gpujpeg_preprocessor_select_decode_kernel<GPUJPEG_YCBCR_BT601_256LVLS>(coder);
}
else {
assert(false);
}
if (coder->preprocessor == NULL) {
return -1;
}
return 0;
}
/** Documented at declaration */
int
gpujpeg_preprocessor_decode(struct gpujpeg_coder* coder, cudaStream_t stream)
{
if (coder->param_image.comp_count == 1) {
cudaMemcpyAsync(coder->d_data_raw, coder->d_data, coder->data_raw_size * sizeof(uint8_t), cudaMemcpyDeviceToDevice, stream);
return 0;
}
assert(coder->param_image.comp_count == 3);
cudaMemsetAsync(coder->d_data_raw, 0, coder->data_raw_size * sizeof(uint8_t), stream);
// Select kernel
gpujpeg_preprocessor_decode_kernel kernel = (gpujpeg_preprocessor_decode_kernel)coder->preprocessor;
assert(kernel != NULL);
int image_width = coder->param_image.width;
int image_height = coder->param_image.height;
// When saving 4:2:2 data of odd width, the data should have even width, so round it
if (coder->param_image.pixel_format == GPUJPEG_422_U8_P1020) {
image_width = gpujpeg_div_and_round_up(coder->param_image.width, 2) * 2;
}
// Prepare unit size
int unitSize = (coder->param_image.pixel_format >= GPUJPEG_444_U8_P012 && coder->param_image.pixel_format <= GPUJPEG_444_U8_P0P1P2) ? 3 : 2;
// Prepare kernel
int alignedSize = gpujpeg_div_and_round_up(image_width * image_height, RGB_8BIT_THREADS) * RGB_8BIT_THREADS * unitSize;
dim3 threads (RGB_8BIT_THREADS);
dim3 grid (alignedSize / (RGB_8BIT_THREADS * unitSize));
assert(alignedSize % (RGB_8BIT_THREADS * unitSize) == 0);
if ( grid.x > GPUJPEG_CUDA_MAXIMUM_GRID_SIZE ) {
grid.y = gpujpeg_div_and_round_up(grid.x, GPUJPEG_CUDA_MAXIMUM_GRID_SIZE);
grid.x = GPUJPEG_CUDA_MAXIMUM_GRID_SIZE;
}
// Run kernel
struct gpujpeg_preprocessor_data data;
for ( int comp = 0; comp < 3; comp++ ) {
assert(coder->sampling_factor.horizontal % coder->component[comp].sampling_factor.horizontal == 0);
assert(coder->sampling_factor.vertical % coder->component[comp].sampling_factor.vertical == 0);
data.comp[comp].d_data = coder->component[comp].d_data;
data.comp[comp].sampling_factor.horizontal = coder->sampling_factor.horizontal / coder->component[comp].sampling_factor.horizontal;
data.comp[comp].sampling_factor.vertical = coder->sampling_factor.vertical / coder->component[comp].sampling_factor.vertical;
data.comp[comp].data_width = coder->component[comp].data_width;
}
kernel<<<grid, threads, 0, stream>>>(
data,
coder->d_data_raw,
image_width,
image_height
);
gpujpeg_cuda_check_error("Preprocessor encoding failed", return -1);
return 0;
}
|
a4fc68861a81a7f09b8692661f3234172280ac7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cuda_hello() {
printf("Hello World from GPU!\n");
}
int main() {
hipLaunchKernelGGL(( cuda_hello), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
return 0;
} | a4fc68861a81a7f09b8692661f3234172280ac7a.cu | #include <stdio.h>
__global__ void cuda_hello() {
printf("Hello World from GPU!\n");
}
int main() {
cuda_hello<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
} |
736d8d81ff300e4fcd85da5997e4da0bc6a3dbbf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define MAKE_NAME(prefix, fun, T) prefix ## _ ## fun ## _ ## T
#define MAP_FUN_1(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T *in, int inMajorStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(in[col * inMajorStride + row]);\
}\
}\
}
MAP_FUN_1(acos, TYPE)
MAP_FUN_1(acosh, TYPE)
MAP_FUN_1(asin, TYPE)
MAP_FUN_1(asinh, TYPE)
MAP_FUN_1(atan, TYPE)
MAP_FUN_1(atanh, TYPE)
MAP_FUN_1(cbrt, TYPE)
MAP_FUN_1(ceil, TYPE)
MAP_FUN_1(cos, TYPE)
MAP_FUN_1(cosh, TYPE)
MAP_FUN_1(cospi, TYPE)
MAP_FUN_1(erfc, TYPE)
MAP_FUN_1(erfcinv, TYPE)
MAP_FUN_1(erfcx, TYPE)
MAP_FUN_1(erf, TYPE)
MAP_FUN_1(erfinv, TYPE)
MAP_FUN_1(exp10, TYPE)
MAP_FUN_1(exp2, TYPE)
MAP_FUN_1(exp, TYPE)
MAP_FUN_1(expm1, TYPE)
MAP_FUN_1(fabs, TYPE)
MAP_FUN_1(floor, TYPE)
MAP_FUN_1(j0, TYPE)
MAP_FUN_1(j1, TYPE)
MAP_FUN_1(lgamma, TYPE)
MAP_FUN_1(log10, TYPE)
MAP_FUN_1(log1p, TYPE)
MAP_FUN_1(log2, TYPE)
MAP_FUN_1(logb, TYPE)
MAP_FUN_1(log, TYPE)
MAP_FUN_1(nearbyint, TYPE)
MAP_FUN_1(normcdf, TYPE)
MAP_FUN_1(normcdfinv, TYPE)
MAP_FUN_1(rcbrt, TYPE)
MAP_FUN_1(rint, TYPE)
MAP_FUN_1(round, TYPE)
MAP_FUN_1(rsqrt, TYPE)
MAP_FUN_1(sin, TYPE)
MAP_FUN_1(sinh, TYPE)
MAP_FUN_1(sinpi, TYPE)
MAP_FUN_1(sqrt, TYPE)
MAP_FUN_1(tan, TYPE)
MAP_FUN_1(tanh, TYPE)
MAP_FUN_1(tgamma, TYPE)
MAP_FUN_1(trunc, TYPE)
MAP_FUN_1(y0, TYPE)
MAP_FUN_1(y1, TYPE)
#define MAP_FUN_2(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map2, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T *a, int aMajorStride,\
const T *b, int bMajorStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b[col * bMajorStride + row]);\
}\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_v_s, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T *a, int aMajorStride,\
const T b) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b);\
}\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_s_v, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T a,\
const T *b, int bMajorStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(a, b[col * bMajorStride + row]);\
}\
}\
}\
__device__ inline TYPE add(TYPE a, TYPE b) { return a + b; }
__device__ inline TYPE sub(TYPE a, TYPE b) { return a - b; }
__device__ inline TYPE mul(TYPE a, TYPE b) { return a * b; }
__device__ inline TYPE div(TYPE a, TYPE b) { return a / b; }
__device__ inline TYPE mod(TYPE a, TYPE b) { return fmod(a, b); }
MAP_FUN_2(add, TYPE)
MAP_FUN_2(sub, TYPE)
MAP_FUN_2(mul, TYPE)
MAP_FUN_2(div, TYPE)
MAP_FUN_2(mod, TYPE)
MAP_FUN_2(pow, TYPE)
MAP_FUN_2(max, TYPE)
MAP_FUN_2(min, TYPE)
// TODO: add back in set
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_addf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_subf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mulf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_divf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negatef (size_t n, TYPE *result, TYPE *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAddf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSubf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMulf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDivf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_ltf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_ltef (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtef (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_nef (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0f:0.0f;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lteScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gteScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_neScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0f:0.0f;
}
}
| 736d8d81ff300e4fcd85da5997e4da0bc6a3dbbf.cu | #include <stdio.h>
#define MAKE_NAME(prefix, fun, T) prefix ## _ ## fun ## _ ## T
#define MAP_FUN_1(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T *in, int inMajorStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(in[col * inMajorStride + row]);\
}\
}\
}
MAP_FUN_1(acos, TYPE)
MAP_FUN_1(acosh, TYPE)
MAP_FUN_1(asin, TYPE)
MAP_FUN_1(asinh, TYPE)
MAP_FUN_1(atan, TYPE)
MAP_FUN_1(atanh, TYPE)
MAP_FUN_1(cbrt, TYPE)
MAP_FUN_1(ceil, TYPE)
MAP_FUN_1(cos, TYPE)
MAP_FUN_1(cosh, TYPE)
MAP_FUN_1(cospi, TYPE)
MAP_FUN_1(erfc, TYPE)
MAP_FUN_1(erfcinv, TYPE)
MAP_FUN_1(erfcx, TYPE)
MAP_FUN_1(erf, TYPE)
MAP_FUN_1(erfinv, TYPE)
MAP_FUN_1(exp10, TYPE)
MAP_FUN_1(exp2, TYPE)
MAP_FUN_1(exp, TYPE)
MAP_FUN_1(expm1, TYPE)
MAP_FUN_1(fabs, TYPE)
MAP_FUN_1(floor, TYPE)
MAP_FUN_1(j0, TYPE)
MAP_FUN_1(j1, TYPE)
MAP_FUN_1(lgamma, TYPE)
MAP_FUN_1(log10, TYPE)
MAP_FUN_1(log1p, TYPE)
MAP_FUN_1(log2, TYPE)
MAP_FUN_1(logb, TYPE)
MAP_FUN_1(log, TYPE)
MAP_FUN_1(nearbyint, TYPE)
MAP_FUN_1(normcdf, TYPE)
MAP_FUN_1(normcdfinv, TYPE)
MAP_FUN_1(rcbrt, TYPE)
MAP_FUN_1(rint, TYPE)
MAP_FUN_1(round, TYPE)
MAP_FUN_1(rsqrt, TYPE)
MAP_FUN_1(sin, TYPE)
MAP_FUN_1(sinh, TYPE)
MAP_FUN_1(sinpi, TYPE)
MAP_FUN_1(sqrt, TYPE)
MAP_FUN_1(tan, TYPE)
MAP_FUN_1(tanh, TYPE)
MAP_FUN_1(tgamma, TYPE)
MAP_FUN_1(trunc, TYPE)
MAP_FUN_1(y0, TYPE)
MAP_FUN_1(y1, TYPE)
#define MAP_FUN_2(fun, T) \
extern "C" \
__global__ void MAKE_NAME(map2, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T *a, int aMajorStride,\
const T *b, int bMajorStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b[col * bMajorStride + row]);\
}\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_v_s, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T *a, int aMajorStride,\
const T b) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(a[col * aMajorStride + row], b);\
}\
}\
}\
\
extern "C" \
__global__ void MAKE_NAME(map2_s_v, fun, T) (int rows, int cols,\
T *out, int outMajorStride,\
const T a,\
const T *b, int bMajorStride) {\
for(int col = threadIdx.x + blockIdx.x * blockDim.x; col < cols; col += blockDim.x * gridDim.x) {\
for(int row = threadIdx.y + blockIdx.y * blockDim.y; row < rows; row += blockDim.y * gridDim.y) {\
out[col * outMajorStride + row] = fun(a, b[col * bMajorStride + row]);\
}\
}\
}\
__device__ inline TYPE add(TYPE a, TYPE b) { return a + b; }
__device__ inline TYPE sub(TYPE a, TYPE b) { return a - b; }
__device__ inline TYPE mul(TYPE a, TYPE b) { return a * b; }
__device__ inline TYPE div(TYPE a, TYPE b) { return a / b; }
__device__ inline TYPE mod(TYPE a, TYPE b) { return fmod(a, b); }
MAP_FUN_2(add, TYPE)
MAP_FUN_2(sub, TYPE)
MAP_FUN_2(mul, TYPE)
MAP_FUN_2(div, TYPE)
MAP_FUN_2(mod, TYPE)
MAP_FUN_2(pow, TYPE)
MAP_FUN_2(max, TYPE)
MAP_FUN_2(min, TYPE)
// TODO: add back in set
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_addf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_subf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mulf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_divf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negatef (size_t n, TYPE *result, TYPE *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAddf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSubf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMulf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDivf (size_t n, TYPE *result, TYPE x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_ltf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_ltef (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtef (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtf (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_nef (size_t n, TYPE *result, TYPE *x, TYPE *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0f:0.0f;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lteScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gteScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_neScalarf (size_t n, TYPE *result, TYPE *x, TYPE y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0f:0.0f;
}
}
|
3eaa7c4b7904cd85598ef60fe7a440c3be9c9c94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define N 22
__global__ void MatAdd(int A[][N], int B[][N], int C[][N]){
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
void randmatfunc(int newmat[N][N]){
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
printf("\n--------------------------------------\n");
}
int main(){
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
//Allocate memories for device copies of A,B,C
hipMalloc((void**)&d_A, (N*N)*sizeof(int));
hipMalloc((void**)&d_B, (N*N)*sizeof(int));
hipMalloc((void**)&d_C, (N*N)*sizeof(int));
//Alloc space for host copies of A,B,C and setup input values
hipMemcpy(d_A, A, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, (N*N)*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_C, C, (N*N)*sizeof(int), hipMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
//Launch MatAdd() kernel on GPU
hipLaunchKernelGGL(( MatAdd), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_A,d_B,d_C);
//copy results back to host
hipMemcpy(C, d_C, (N*N)*sizeof(int), hipMemcpyDeviceToHost);
int i, j; printf("C = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", C[i][j]);
}
printf("\n");
}
//cleanup
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
printf("\n");
return 0;
}
| 3eaa7c4b7904cd85598ef60fe7a440c3be9c9c94.cu | #include <stdio.h>
#include <stdlib.h>
#define N 22
__global__ void MatAdd(int A[][N], int B[][N], int C[][N]){
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
void randmatfunc(int newmat[N][N]){
int i, j, k;
for(i=0;i<N;i++){
for(j=0;j<N;j++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[i][j] =k;
}
printf("\n");
}
printf("\n--------------------------------------\n");
}
int main(){
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
//Allocate memories for device copies of A,B,C
cudaMalloc((void**)&d_A, (N*N)*sizeof(int));
cudaMalloc((void**)&d_B, (N*N)*sizeof(int));
cudaMalloc((void**)&d_C, (N*N)*sizeof(int));
//Alloc space for host copies of A,B,C and setup input values
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
//Launch MatAdd() kernel on GPU
MatAdd<<<numBlocks,threadsPerBlock>>>(d_A,d_B,d_C);
//copy results back to host
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int i, j; printf("C = \n");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
printf("%d ", C[i][j]);
}
printf("\n");
}
//cleanup
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
2ba47af551515111a6b050bfd36b2274ad7844b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "ATen/hip/HIPHalf.cuh"
#include "ATen/Half.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
namespace at {
#if TORCH_HIP_VERSION < 9000 && !defined(__HIP_PLATFORM_HCC__)
template <> AT_CUDA_API
half convert(Half aten_half) {
return half{aten_half.x};
}
template <> AT_CUDA_API
half convert(double value) {
return half{Half(value).x};
}
template <> AT_CUDA_API
Half convert(half cuda_half) {
return Half(cuda_half.x, Half::from_bits);
}
#else
template <> AT_CUDA_API
half convert(Half aten_half) {
__half_raw x_raw;
x_raw.x = aten_half.x;
return half(x_raw);
}
template <> AT_CUDA_API
Half convert(half cuda_half) {
__half_raw raw(cuda_half);
return Half(raw.x, Half::from_bits);
}
template <> AT_CUDA_API
half convert(double value) {
__half_raw raw;
raw.x = Half(value).x;
return half {raw};
}
template <> __half HalfFix(Half h) {
__half_raw raw;
raw.x = h.x;
return __half{raw};
}
template <> Half HalfFix(__half h) {
__half_raw raw(h);
return Half(raw.x, Half::from_bits);
}
#endif
} // namespace at
| 2ba47af551515111a6b050bfd36b2274ad7844b7.cu | #include "ATen/cuda/CUDAHalf.cuh"
#include "ATen/Half.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_fp16.h>
namespace at {
#if CUDA_VERSION < 9000 && !defined(__HIP_PLATFORM_HCC__)
template <> AT_CUDA_API
half convert(Half aten_half) {
return half{aten_half.x};
}
template <> AT_CUDA_API
half convert(double value) {
return half{Half(value).x};
}
template <> AT_CUDA_API
Half convert(half cuda_half) {
return Half(cuda_half.x, Half::from_bits);
}
#else
template <> AT_CUDA_API
half convert(Half aten_half) {
__half_raw x_raw;
x_raw.x = aten_half.x;
return half(x_raw);
}
template <> AT_CUDA_API
Half convert(half cuda_half) {
__half_raw raw(cuda_half);
return Half(raw.x, Half::from_bits);
}
template <> AT_CUDA_API
half convert(double value) {
__half_raw raw;
raw.x = Half(value).x;
return half {raw};
}
template <> __half HalfFix(Half h) {
__half_raw raw;
raw.x = h.x;
return __half{raw};
}
template <> Half HalfFix(__half h) {
__half_raw raw(h);
return Half(raw.x, Half::from_bits);
}
#endif
} // namespace at
|
cc27bdbea0ff9df0f4d22478ff36e550a259d596.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define SIZEOFINT sizeof(int)
#define BLOCK_DIM 64
#define TH_DIM 32
const int INF = ((1 << 30) - 1);
int n, m, padding_n, pitch_k, Dist_row_size_in_byte;
size_t pitch;
int up_part_size_in_block = 0, bottom_part_size_in_block = 0, up_part_height = 0, bottom_part_height = 0;
int *Dist, *Dist_s;
int *Dist_cuda, *Dist_cuda0, *Dist_cuda1;
void show_mat(int *start_p, int vertex_num){
for(int i = 0; i < vertex_num; i++){
for(int j = 0; j < vertex_num; j++){
if(start_p[i * vertex_num + j] == INF){
printf("INF\t ");
}else{
printf("%d\t ", start_p[i * vertex_num + j]);
}
}
printf("\n");
}
}
void show_mat_cuda(int *start_p, int vertex_num, int padding_n, size_t pitch, int device_id){
int *temp = (int*)malloc(SIZEOFINT * padding_n * padding_n);
hipSetDevice(device_id);
// hipMemcpy(temp, start_p, (SIZEOFINT * vertex_num * vertex_num), hipMemcpyDeviceToHost);
hipMemcpy2D(temp, SIZEOFINT * padding_n, start_p, pitch, SIZEOFINT * padding_n, padding_n, hipMemcpyDeviceToHost);
printf("---\n");
for(int i = 0; i < vertex_num; i++){
for(int j = 0; j < vertex_num; j++){
if(temp[i * vertex_num + j] == INF){
printf("INF\t ");
}else{
printf("%d\t ", temp[i * vertex_num + j]);
}
}
printf("\n");
}
printf("---\n");
}
void malloc_Dist(){
hipHostMalloc(&Dist, SIZEOFINT * padding_n * padding_n, hipHostMallocPortable);
// Dist = (int*)malloc(SIZEOFINT * padding_n * padding_n);
Dist_s = (int*)malloc(SIZEOFINT * n * n);
}
int getDist(int i, int j){return Dist[i * padding_n + j];}
int *getDistAddr(int i, int j){return &(Dist[i * padding_n + j]);}
void setDist(int i, int j, int val){Dist[i * padding_n + j] = val;}
void setup_DistCuda(){
// hipMalloc((void **)&Dist_cuda, SIZEOFINT * padding_n * padding_n);
// hipMemcpy(Dist_cuda, Dist, (padding_n * padding_n * SIZEOFINT), hipMemcpyHostToDevice);
// hipMallocPitch(&Dist_cuda, &pitch, SIZEOFINT * padding_n, padding_n);
// hipMemcpy2D(Dist_cuda, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, hipMemcpyHostToDevice);
// pitch_k = ((int)pitch) / SIZEOFINT;
hipStream_t stream;
hipStreamCreate(&stream);
hipSetDevice(0);
hipDeviceEnablePeerAccess(0, 0);
hipMallocPitch(&Dist_cuda0, &pitch, SIZEOFINT * padding_n, padding_n);
hipMemcpy2DAsync(Dist_cuda0, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, hipMemcpyHostToDevice, stream);
pitch_k = ((int)pitch) / SIZEOFINT;
hipSetDevice(1);
hipDeviceEnablePeerAccess(1, 0);
hipMallocPitch(&Dist_cuda1, &pitch, SIZEOFINT * padding_n, padding_n);
hipMemcpy2D(Dist_cuda1, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, hipMemcpyHostToDevice);
hipStreamDestroy(stream);
}
void back_DistCuda(){
// hipMemcpy(Dist, Dist_cuda, (padding_n * padding_n * SIZEOFINT), hipMemcpyDeviceToHost);
// hipMemcpy2D(Dist, SIZEOFINT * padding_n, Dist_cuda, pitch, SIZEOFINT * padding_n, padding_n, hipMemcpyDeviceToHost);
hipStream_t stream;
hipStreamCreate(&stream);
hipSetDevice(0);
hipMemcpy2DAsync(Dist, SIZEOFINT * padding_n, Dist_cuda0, pitch, SIZEOFINT * padding_n, padding_n, hipMemcpyDeviceToHost, stream);
hipSetDevice(1);
hipMemcpy2D(&(Dist[up_part_height * padding_n]), SIZEOFINT * padding_n, &(Dist_cuda1[up_part_height * pitch_k]), pitch, SIZEOFINT * padding_n, (bottom_part_height), hipMemcpyDeviceToHost);
hipStreamDestroy(stream);
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
padding_n = ((n + BLOCK_DIM - 1) / BLOCK_DIM) * BLOCK_DIM;
Dist_row_size_in_byte = SIZEOFINT * padding_n;
malloc_Dist();
for (int i = 0; i < padding_n; i++) {
for (int j = 0; j < padding_n; j++) {
if (i == j) {
setDist(i, j, 0);
// Dist[i][j] = 0;
} else {
setDist(i, j, INF);
// Dist[i][j] = INF;
}
}
}
int pair[3];
int *edges_buf = (int*)malloc(3 * m * SIZEOFINT);
fread(edges_buf, sizeof(int), 3 * m, file);
for (int i = 0; i < m; i++) {
// fread(pair, sizeof(int), 3, file);
setDist(edges_buf[3 * i], edges_buf[3 * i + 1], edges_buf[3 * i + 2]);
}
free(edges_buf);
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
// if (Dist[i][j] >= INF) Dist[i][j] = INF;
if (getDist(i, j) >= INF) setDist(i, j, INF);
Dist_s[i * n + j] = getDist(i, j);
}
// fwrite(Dist[i], sizeof(int), n, outfile);
// fwrite(getDistAddr(i, 0), SIZEOFINT, n, outfile);
}
fwrite(Dist_s, sizeof(int), n * n, outfile);
fclose(outfile);
}
__forceinline__
__device__ void block_calc(int* C, int* A, int* B, int bj, int bi) {
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj];
// int sum1 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj];
// int sum2 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)];
// int sum3 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)];
C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0);
// C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1);
// C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2);
// C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3);
__syncthreads();
}
}
__forceinline__
__device__ void block_calc_rev_async(int* C, int* A, int* B, int bj, int bi) {
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + bj];
// int sum1 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + bj];
// int sum2 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + (bj + TH_DIM)];
// int sum3 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + (bj + TH_DIM)];
C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0);
// C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1);
// C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2);
// C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3);
}
}
__global__ void floyd_warshall_block_kernel_phase1(int n, int k, int* graph) {
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
// Transfer to temp shared arrays
C[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, C, C, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = C[bi][k] + C[k][bj];
int sum1 = C[(bi + TH_DIM)][k] + C[k][bj];
int sum2 = C[bi][k] + C[k][(bj + TH_DIM)];
int sum3 = C[(bi + TH_DIM)][k] + C[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
// Transfer back to graph
graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi][bj];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase2(int n, int k, int* graph) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
// Phase 2 1/2
const unsigned int i = blockIdx.x;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
// __shared__ int A[BLOCK_DIM * BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
C[bi][bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, C, B, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = C[bi][k] + B[k][bj];
int sum1 = C[(bi + TH_DIM)][k] + B[k][bj];
int sum2 = C[bi][k] + B[k][(bj + TH_DIM)];
int sum3 = C[(bi + TH_DIM)][k] + B[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi][bj];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
// Phase 2 2/2
C[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, B, C, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = B[bi][k] + C[k][bj];
int sum1 = B[(bi + TH_DIM)][k] + C[k][bj];
int sum2 = B[bi][k] + C[k][(bj + TH_DIM)];
int sum3 = B[(bi + TH_DIM)][k] + C[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
// Block C is the only one that could be changed
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi][bj];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase3(int n, int k, int* graph, int start_x, int start_y) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
const unsigned int j = start_x + blockIdx.x;
const unsigned int i = start_y + blockIdx.y;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int A[BLOCK_DIM][BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
C[bi][bj] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))];
A[bj][bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
A[bj][(bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
A[(bj + TH_DIM)][bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
A[(bj + TH_DIM)][(bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc_rev_async(C, A, B, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = A[k][bi] + B[k][bj];
int sum1 = A[k][(bi + TH_DIM)] + B[k][bj];
int sum2 = A[k][bi] + B[k][(bj + TH_DIM)];
int sum3 = A[k][(bi + TH_DIM)] + B[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
}
// __syncthreads();
graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)] = C[bi][bj];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase21(int n, int k, int* graph, int start) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
// Phase 2 1/2, update column
// const unsigned int i = blockIdx.x;
const unsigned int i = start + blockIdx.x;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int A[BLOCK_DIM][BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
C[bi][bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, C, B, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = C[bi][k] + B[k][bj];
int sum1 = C[(bi + TH_DIM)][k] + B[k][bj];
int sum2 = C[bi][k] + B[k][(bj + TH_DIM)];
int sum3 = C[(bi + TH_DIM)][k] + B[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi][bj];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase22(int n, int k, int* graph, int start) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
// Phase 2 2/2, update row
// const unsigned int i = blockIdx.x;
const unsigned int i = start + blockIdx.x;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int A[BLOCK_DIM][BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, B, C, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = B[bi][k] + C[k][bj];
int sum1 = B[(bi + TH_DIM)][k] + C[k][bj];
int sum2 = B[bi][k] + C[k][(bj + TH_DIM)];
int sum3 = B[(bi + TH_DIM)][k] + C[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
// Block C is the only one that could be changed
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi][bj];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
void block_FW_cuda() {
// int round = padding_n / B;
const int blocks = padding_n / BLOCK_DIM;
dim3 block_dim(TH_DIM, TH_DIM, 1);
dim3 phase3_grid(blocks, blocks, 1);
// for (int k = 0; k < blocks; k++) {
// hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase1), dim3(1), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda);
// floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda);
// floyd_warshall_block_kernel_phase3<<<phase3_grid, block_dim>>>(pitch_k, k, Dist_cuda, 0, 0);
// }
const int row_size_pitchk = BLOCK_DIM * pitch_k;
up_part_size_in_block = (blocks+1)/2;
bottom_part_size_in_block = blocks/2;
up_part_height = BLOCK_DIM * up_part_size_in_block;
bottom_part_height = BLOCK_DIM * bottom_part_size_in_block;
dim3 phase31_grid(blocks, up_part_size_in_block, 1);
dim3 phase32_grid(blocks, bottom_part_size_in_block, 1);
// printf("Up Blocks: %d, Bottom Blocks: %d\n", up_part_size_in_block, bottom_part_size_in_block);
for (int k = 0; k < blocks; k++) {
int next_k = k + 1;
// Phase 1
hipSetDevice(0);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase1), dim3(1), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda0);
hipSetDevice(1);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase1), dim3(1), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda1);
// Phase 2
hipStream_t stream;
hipStreamCreate(&stream);
hipSetDevice(0);
// floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda0);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase21), dim3(up_part_size_in_block), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda0, 0);
hipSetDevice(1);
// floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda1);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase21), dim3(bottom_part_size_in_block), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda1, up_part_size_in_block);
// Calculate rows of phase 2
if(k < up_part_size_in_block){
hipSetDevice(0);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase22), dim3(blocks), dim3(block_dim), 0, stream, pitch_k, k, Dist_cuda0, 0);
}else{
hipSetDevice(1);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase22), dim3(blocks), dim3(block_dim), 0, stream, pitch_k, k, Dist_cuda1, 0);
}
hipStreamDestroy(stream);
// Phase 3
hipSetDevice(0);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase3), dim3(phase31_grid), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda0, 0, 0);
hipSetDevice(1);
hipLaunchKernelGGL(( floyd_warshall_block_kernel_phase3), dim3(phase32_grid), dim3(block_dim), 0, 0, pitch_k, k, Dist_cuda1, 0, up_part_size_in_block);
// Transfer data to another GPU
if(next_k < up_part_size_in_block){
// printf("Up K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks);
hipMemcpyPeer(&(Dist_cuda1[next_k * row_size_pitchk]), 1, &(Dist_cuda0[next_k * row_size_pitchk]), 0, SIZEOFINT * row_size_pitchk);
}else if(next_k < blocks){
// printf("Down K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks);
hipMemcpyPeer(&(Dist_cuda0[next_k * row_size_pitchk]), 0, &(Dist_cuda1[next_k * row_size_pitchk]), 1, SIZEOFINT * row_size_pitchk);
}
}
}
int main(int argc, char* argv[]) {
input(argv[1]);
// show_mat(getDistAddr(0, 0), n);
setup_DistCuda();
// printf("Vertice: %d, Edge: %d, B: %d, Padding: %d\n", n, m, BLOCK_DIM, padding_n);
block_FW_cuda();
back_DistCuda();
// show_mat(getDistAddr(0, 0), n);
output(argv[2]);
// show_mat(getDistAddr(0, 0), n);
return 0;
} | cc27bdbea0ff9df0f4d22478ff36e550a259d596.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define SIZEOFINT sizeof(int)
#define BLOCK_DIM 64
#define TH_DIM 32
const int INF = ((1 << 30) - 1);
int n, m, padding_n, pitch_k, Dist_row_size_in_byte;
size_t pitch;
int up_part_size_in_block = 0, bottom_part_size_in_block = 0, up_part_height = 0, bottom_part_height = 0;
int *Dist, *Dist_s;
int *Dist_cuda, *Dist_cuda0, *Dist_cuda1;
void show_mat(int *start_p, int vertex_num){
for(int i = 0; i < vertex_num; i++){
for(int j = 0; j < vertex_num; j++){
if(start_p[i * vertex_num + j] == INF){
printf("INF\t ");
}else{
printf("%d\t ", start_p[i * vertex_num + j]);
}
}
printf("\n");
}
}
void show_mat_cuda(int *start_p, int vertex_num, int padding_n, size_t pitch, int device_id){
int *temp = (int*)malloc(SIZEOFINT * padding_n * padding_n);
cudaSetDevice(device_id);
// cudaMemcpy(temp, start_p, (SIZEOFINT * vertex_num * vertex_num), cudaMemcpyDeviceToHost);
cudaMemcpy2D(temp, SIZEOFINT * padding_n, start_p, pitch, SIZEOFINT * padding_n, padding_n, cudaMemcpyDeviceToHost);
printf("---\n");
for(int i = 0; i < vertex_num; i++){
for(int j = 0; j < vertex_num; j++){
if(temp[i * vertex_num + j] == INF){
printf("INF\t ");
}else{
printf("%d\t ", temp[i * vertex_num + j]);
}
}
printf("\n");
}
printf("---\n");
}
void malloc_Dist(){
cudaHostAlloc(&Dist, SIZEOFINT * padding_n * padding_n, cudaHostAllocPortable);
// Dist = (int*)malloc(SIZEOFINT * padding_n * padding_n);
Dist_s = (int*)malloc(SIZEOFINT * n * n);
}
int getDist(int i, int j){return Dist[i * padding_n + j];}
int *getDistAddr(int i, int j){return &(Dist[i * padding_n + j]);}
void setDist(int i, int j, int val){Dist[i * padding_n + j] = val;}
void setup_DistCuda(){
// cudaMalloc((void **)&Dist_cuda, SIZEOFINT * padding_n * padding_n);
// cudaMemcpy(Dist_cuda, Dist, (padding_n * padding_n * SIZEOFINT), cudaMemcpyHostToDevice);
// cudaMallocPitch(&Dist_cuda, &pitch, SIZEOFINT * padding_n, padding_n);
// cudaMemcpy2D(Dist_cuda, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, cudaMemcpyHostToDevice);
// pitch_k = ((int)pitch) / SIZEOFINT;
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaSetDevice(0);
cudaDeviceEnablePeerAccess(0, 0);
cudaMallocPitch(&Dist_cuda0, &pitch, SIZEOFINT * padding_n, padding_n);
cudaMemcpy2DAsync(Dist_cuda0, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, cudaMemcpyHostToDevice, stream);
pitch_k = ((int)pitch) / SIZEOFINT;
cudaSetDevice(1);
cudaDeviceEnablePeerAccess(1, 0);
cudaMallocPitch(&Dist_cuda1, &pitch, SIZEOFINT * padding_n, padding_n);
cudaMemcpy2D(Dist_cuda1, pitch, Dist, SIZEOFINT * padding_n, SIZEOFINT * padding_n, padding_n, cudaMemcpyHostToDevice);
cudaStreamDestroy(stream);
}
void back_DistCuda(){
// cudaMemcpy(Dist, Dist_cuda, (padding_n * padding_n * SIZEOFINT), cudaMemcpyDeviceToHost);
// cudaMemcpy2D(Dist, SIZEOFINT * padding_n, Dist_cuda, pitch, SIZEOFINT * padding_n, padding_n, cudaMemcpyDeviceToHost);
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaSetDevice(0);
cudaMemcpy2DAsync(Dist, SIZEOFINT * padding_n, Dist_cuda0, pitch, SIZEOFINT * padding_n, padding_n, cudaMemcpyDeviceToHost, stream);
cudaSetDevice(1);
cudaMemcpy2D(&(Dist[up_part_height * padding_n]), SIZEOFINT * padding_n, &(Dist_cuda1[up_part_height * pitch_k]), pitch, SIZEOFINT * padding_n, (bottom_part_height), cudaMemcpyDeviceToHost);
cudaStreamDestroy(stream);
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
padding_n = ((n + BLOCK_DIM - 1) / BLOCK_DIM) * BLOCK_DIM;
Dist_row_size_in_byte = SIZEOFINT * padding_n;
malloc_Dist();
for (int i = 0; i < padding_n; i++) {
for (int j = 0; j < padding_n; j++) {
if (i == j) {
setDist(i, j, 0);
// Dist[i][j] = 0;
} else {
setDist(i, j, INF);
// Dist[i][j] = INF;
}
}
}
int pair[3];
int *edges_buf = (int*)malloc(3 * m * SIZEOFINT);
fread(edges_buf, sizeof(int), 3 * m, file);
for (int i = 0; i < m; i++) {
// fread(pair, sizeof(int), 3, file);
setDist(edges_buf[3 * i], edges_buf[3 * i + 1], edges_buf[3 * i + 2]);
}
free(edges_buf);
fclose(file);
}
void output(char* outFileName) {
FILE* outfile = fopen(outFileName, "w");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
// if (Dist[i][j] >= INF) Dist[i][j] = INF;
if (getDist(i, j) >= INF) setDist(i, j, INF);
Dist_s[i * n + j] = getDist(i, j);
}
// fwrite(Dist[i], sizeof(int), n, outfile);
// fwrite(getDistAddr(i, 0), SIZEOFINT, n, outfile);
}
fwrite(Dist_s, sizeof(int), n * n, outfile);
fclose(outfile);
}
__forceinline__
__device__ void block_calc(int* C, int* A, int* B, int bj, int bi) {
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj];
// int sum1 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + bj];
// int sum2 = A[bi*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)];
// int sum3 = A[(bi + TH_DIM)*BLOCK_DIM + k] + B[k*BLOCK_DIM + (bj + TH_DIM)];
C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0);
// C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1);
// C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2);
// C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3);
__syncthreads();
}
}
__forceinline__
__device__ void block_calc_rev_async(int* C, int* A, int* B, int bj, int bi) {
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + bj];
// int sum1 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + bj];
// int sum2 = A[k*BLOCK_DIM + bi] + B[k*BLOCK_DIM + (bj + TH_DIM)];
// int sum3 = A[k*BLOCK_DIM + (bi + TH_DIM)] + B[k*BLOCK_DIM + (bj + TH_DIM)];
C[bi*BLOCK_DIM + bj] = min(C[bi*BLOCK_DIM + bj], sum0);
// C[(bi + TH_DIM)*BLOCK_DIM + bj] = min(C[(bi + TH_DIM)*BLOCK_DIM + bj], sum1);
// C[bi*BLOCK_DIM + (bj + TH_DIM)] = min(C[bi*BLOCK_DIM + (bj + TH_DIM)], sum2);
// C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)] = min(C[(bi + TH_DIM)*BLOCK_DIM + (bj + TH_DIM)], sum3);
}
}
__global__ void floyd_warshall_block_kernel_phase1(int n, int k, int* graph) {
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
// Transfer to temp shared arrays
C[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, C, C, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = C[bi][k] + C[k][bj];
int sum1 = C[(bi + TH_DIM)][k] + C[k][bj];
int sum2 = C[bi][k] + C[k][(bj + TH_DIM)];
int sum3 = C[(bi + TH_DIM)][k] + C[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
// Transfer back to graph
graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi][bj];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase2(int n, int k, int* graph) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
// Phase 2 1/2
const unsigned int i = blockIdx.x;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
// __shared__ int A[BLOCK_DIM * BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
C[bi][bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, C, B, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = C[bi][k] + B[k][bj];
int sum1 = C[(bi + TH_DIM)][k] + B[k][bj];
int sum2 = C[bi][k] + B[k][(bj + TH_DIM)];
int sum3 = C[(bi + TH_DIM)][k] + B[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi][bj];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
// Phase 2 2/2
C[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, B, C, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = B[bi][k] + C[k][bj];
int sum1 = B[(bi + TH_DIM)][k] + C[k][bj];
int sum2 = B[bi][k] + C[k][(bj + TH_DIM)];
int sum3 = B[(bi + TH_DIM)][k] + C[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
// Block C is the only one that could be changed
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi][bj];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase3(int n, int k, int* graph, int start_x, int start_y) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
const unsigned int j = start_x + blockIdx.x;
const unsigned int i = start_y + blockIdx.y;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int A[BLOCK_DIM][BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
C[bi][bj] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))];
A[bj][bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
A[bj][(bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
A[(bj + TH_DIM)][bi] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
A[(bj + TH_DIM)][(bi + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc_rev_async(C, A, B, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = A[k][bi] + B[k][bj];
int sum1 = A[k][(bi + TH_DIM)] + B[k][bj];
int sum2 = A[k][bi] + B[k][(bj + TH_DIM)];
int sum3 = A[k][(bi + TH_DIM)] + B[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
}
// __syncthreads();
graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + bj)] = C[bi][bj];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(i*BLOCK_DIM + bi)*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (j*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase21(int n, int k, int* graph, int start) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
// Phase 2 1/2, update column
// const unsigned int i = blockIdx.x;
const unsigned int i = start + blockIdx.x;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int A[BLOCK_DIM][BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
C[bi][bj] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, C, B, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = C[bi][k] + B[k][bj];
int sum1 = C[(bi + TH_DIM)][k] + B[k][bj];
int sum2 = C[bi][k] + B[k][(bj + TH_DIM)];
int sum3 = C[(bi + TH_DIM)][k] + B[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)] = C[bi][bj];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(i*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(i*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
__global__ void floyd_warshall_block_kernel_phase22(int n, int k, int* graph, int start) {
// BlockDim is one dimensional (Straight along diagonal)
// Blocks themselves are two dimensional
// Phase 2 2/2, update row
// const unsigned int i = blockIdx.x;
const unsigned int i = start + blockIdx.x;
const unsigned int bi = threadIdx.y;
const unsigned int bj = threadIdx.x;
__shared__ int A[BLOCK_DIM][BLOCK_DIM];
__shared__ int B[BLOCK_DIM][BLOCK_DIM];
__shared__ int C[BLOCK_DIM][BLOCK_DIM];
B[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + bj)];
B[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + bj)];
B[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (k*BLOCK_DIM + (bj + TH_DIM))];
B[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (k*BLOCK_DIM + (bj + TH_DIM))];
C[bi][bj] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)];
C[(bi + TH_DIM)][bj] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)];
C[bi][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))];
C[(bi + TH_DIM)][(bj + TH_DIM)] = graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))];
__syncthreads();
// block_calc(C, B, C, bi, bj);
#pragma unroll
for (int k = 0; k < BLOCK_DIM; k++) {
int sum0 = B[bi][k] + C[k][bj];
int sum1 = B[(bi + TH_DIM)][k] + C[k][bj];
int sum2 = B[bi][k] + C[k][(bj + TH_DIM)];
int sum3 = B[(bi + TH_DIM)][k] + C[k][(bj + TH_DIM)];
C[bi][bj] = min(C[bi][bj], sum0);
C[(bi + TH_DIM)][bj] = min(C[(bi + TH_DIM)][bj], sum1);
C[bi][(bj + TH_DIM)] = min(C[bi][(bj + TH_DIM)], sum2);
C[(bi + TH_DIM)][(bj + TH_DIM)] = min(C[(bi + TH_DIM)][(bj + TH_DIM)], sum3);
__syncthreads();
}
// Block C is the only one that could be changed
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + bj)] = C[bi][bj];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + bj)] = C[(bi + TH_DIM)][bj];
graph[(k*BLOCK_DIM + bi)*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[bi][(bj + TH_DIM)];
graph[(k*BLOCK_DIM + (bi + TH_DIM))*n + (i*BLOCK_DIM + (bj + TH_DIM))] = C[(bi + TH_DIM)][(bj + TH_DIM)];
}
void block_FW_cuda() {
// int round = padding_n / B;
const int blocks = padding_n / BLOCK_DIM;
dim3 block_dim(TH_DIM, TH_DIM, 1);
dim3 phase3_grid(blocks, blocks, 1);
// for (int k = 0; k < blocks; k++) {
// floyd_warshall_block_kernel_phase1<<<1, block_dim>>>(pitch_k, k, Dist_cuda);
// floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda);
// floyd_warshall_block_kernel_phase3<<<phase3_grid, block_dim>>>(pitch_k, k, Dist_cuda, 0, 0);
// }
const int row_size_pitchk = BLOCK_DIM * pitch_k;
up_part_size_in_block = (blocks+1)/2;
bottom_part_size_in_block = blocks/2;
up_part_height = BLOCK_DIM * up_part_size_in_block;
bottom_part_height = BLOCK_DIM * bottom_part_size_in_block;
dim3 phase31_grid(blocks, up_part_size_in_block, 1);
dim3 phase32_grid(blocks, bottom_part_size_in_block, 1);
// printf("Up Blocks: %d, Bottom Blocks: %d\n", up_part_size_in_block, bottom_part_size_in_block);
for (int k = 0; k < blocks; k++) {
int next_k = k + 1;
// Phase 1
cudaSetDevice(0);
floyd_warshall_block_kernel_phase1<<<1, block_dim>>>(pitch_k, k, Dist_cuda0);
cudaSetDevice(1);
floyd_warshall_block_kernel_phase1<<<1, block_dim>>>(pitch_k, k, Dist_cuda1);
// Phase 2
cudaStream_t stream;
cudaStreamCreate(&stream);
cudaSetDevice(0);
// floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda0);
floyd_warshall_block_kernel_phase21<<<up_part_size_in_block, block_dim, 0>>>(pitch_k, k, Dist_cuda0, 0);
cudaSetDevice(1);
// floyd_warshall_block_kernel_phase2<<<blocks, block_dim>>>(pitch_k, k, Dist_cuda1);
floyd_warshall_block_kernel_phase21<<<bottom_part_size_in_block, block_dim, 0>>>(pitch_k, k, Dist_cuda1, up_part_size_in_block);
// Calculate rows of phase 2
if(k < up_part_size_in_block){
cudaSetDevice(0);
floyd_warshall_block_kernel_phase22<<<blocks, block_dim, 0, stream>>>(pitch_k, k, Dist_cuda0, 0);
}else{
cudaSetDevice(1);
floyd_warshall_block_kernel_phase22<<<blocks, block_dim, 0, stream>>>(pitch_k, k, Dist_cuda1, 0);
}
cudaStreamDestroy(stream);
// Phase 3
cudaSetDevice(0);
floyd_warshall_block_kernel_phase3<<<phase31_grid, block_dim>>>(pitch_k, k, Dist_cuda0, 0, 0);
cudaSetDevice(1);
floyd_warshall_block_kernel_phase3<<<phase32_grid, block_dim>>>(pitch_k, k, Dist_cuda1, 0, up_part_size_in_block);
// Transfer data to another GPU
if(next_k < up_part_size_in_block){
// printf("Up K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks);
cudaMemcpyPeer(&(Dist_cuda1[next_k * row_size_pitchk]), 1, &(Dist_cuda0[next_k * row_size_pitchk]), 0, SIZEOFINT * row_size_pitchk);
}else if(next_k < blocks){
// printf("Down K: %d, Next_K: %d, Blocks: %d\n", k, next_k, blocks);
cudaMemcpyPeer(&(Dist_cuda0[next_k * row_size_pitchk]), 0, &(Dist_cuda1[next_k * row_size_pitchk]), 1, SIZEOFINT * row_size_pitchk);
}
}
}
int main(int argc, char* argv[]) {
input(argv[1]);
// show_mat(getDistAddr(0, 0), n);
setup_DistCuda();
// printf("Vertice: %d, Edge: %d, B: %d, Padding: %d\n", n, m, BLOCK_DIM, padding_n);
block_FW_cuda();
back_DistCuda();
// show_mat(getDistAddr(0, 0), n);
output(argv[2]);
// show_mat(getDistAddr(0, 0), n);
return 0;
} |
49c23ef7bb5f14da85105fc544ad90e413fadff8.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgecscsyncfreetrsm.cu, normal z -> s, Wed Jan 2 14:18:54 2019
@author Weifeng Liu
*/
// CSC Sync-Free SpTRSM kernel
// see paper by W. Liu, A. Li, J. D. Hogg, I. S. Duff, and B. Vinter. (2016).
// "A Synchronization-Free Algorithm for Parallel Sparse Triangular Solves".
// 22nd International European Conference on Parallel and Distributed Computing
// (Euro-Par '16). pp. 617-630.
#include "magmasparse_internal.h"
#include "atomicopsfloat.h"
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#define MAGMA_CSC_SYNCFREE_WARP_SIZE 32
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD 0
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_BACKWARD 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_RHS 2
#define MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO 3
__global__
void sptrsv_syncfree_analyser(magmaIndex_ptr d_cscRowIdx,
magmaFloat_ptr d_cscVal,
magma_int_t m,
magma_int_t nnz,
magmaIndex_ptr d_graphInDegree)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < nnz)
{
atomicAdd(&d_graphInDegree[d_cscRowIdx[global_id]], 1);
}
}
__global__
void sptrsm_syncfree_executor(magmaIndex_ptr d_cscColPtr,
magmaIndex_ptr d_cscRowIdx,
magmaFloat_ptr d_cscVal,
magmaIndex_ptr d_graphInDegree,
magma_int_t m,
magma_int_t substitution,
magma_int_t rhs,
magma_int_t opt,
magmaFloat_ptr d_b,
magmaFloat_ptr d_x)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int global_x_id = global_id / MAGMA_CSC_SYNCFREE_WARP_SIZE;
if (global_x_id >= m) return;
// substitution is forward or backward
global_x_id = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
global_x_id : m - 1 - global_x_id;
// Initialize
const int lane_id = (MAGMA_CSC_SYNCFREE_WARP_SIZE - 1) & threadIdx.x;
// Prefetch
const int pos = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id] : d_cscColPtr[global_x_id+1]-1;
const float one = MAGMA_S_MAKE( 1.0, 0.0);
const float coef = one / d_cscVal[pos];
/*
// clock_t start;
// Consumer
do {
start = clock();
}
while (1 != d_graphInDegree[global_x_id]);
// Consumer
int graphInDegree;
do {
//bypass Tex cache and avoid other mem optimization by nvcc/ptxas
asm("ld.global.u32 %0, [%1];" : "=r"(graphInDegree),"=r"(d_graphInDegree[global_x_id]) :: "memory");
}
while (1 != graphInDegree );
*/
for (int k = lane_id; k < rhs; k += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const int pos = global_x_id * rhs + k;
d_x[pos] = (d_b[pos] - d_x[pos]) * coef;
}
// Producer
const magma_index_t start_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id]+1 : d_cscColPtr[global_x_id];
const magma_index_t stop_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id+1] : d_cscColPtr[global_x_id+1]-1;
if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ)
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_RHS)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO)
{
const magma_index_t len = stop_ptr - start_ptr;
if ((len <= rhs || rhs > 8) && len < 2048)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
}
}
extern "C" magma_int_t
magma_sgecscsyncfreetrsm_analysis(
magma_int_t m,
magma_int_t nnz,
magmaFloat_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
int num_threads = 128;
int num_blocks = ceil ((float)nnz / (float)num_threads);
hipMemset(dgraphindegree, 0, m * sizeof(magma_index_t));
hipLaunchKernelGGL(( sptrsv_syncfree_analyser), dim3(num_blocks), dim3(num_threads) , 0, 0,
drowind, dval, m, nnz, dgraphindegree);
// backup in-degree array
hipMemcpy(dgraphindegree_bak, dgraphindegree,
m * sizeof(int), hipMemcpyDeviceToDevice);
return info;
}
extern "C" magma_int_t
magma_sgecscsyncfreetrsm_solve(
magma_int_t m,
magma_int_t nnz,
magmaFloat_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magmaFloat_ptr dx,
magmaFloat_ptr db,
magma_int_t substitution,
magma_int_t rhs,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
// get an unmodified in-degree array, only for benchmarking use
hipMemcpy(dgraphindegree, dgraphindegree_bak,
m * sizeof(magma_index_t), hipMemcpyDeviceToDevice);
// clear d_x for atomic operations
hipMemset(dx, 0, sizeof(float) * m * rhs);
int num_threads, num_blocks;
num_threads = 4 * MAGMA_CSC_SYNCFREE_WARP_SIZE;
num_blocks = ceil ((float)m /
(float)(num_threads/MAGMA_CSC_SYNCFREE_WARP_SIZE));
hipLaunchKernelGGL(( sptrsm_syncfree_executor), dim3(num_blocks), dim3(num_threads) , 0, 0,
dcolptr, drowind, dval, dgraphindegree,
m, substitution, rhs, MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO,
db, dx);
return info;
}
| 49c23ef7bb5f14da85105fc544ad90e413fadff8.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgecscsyncfreetrsm.cu, normal z -> s, Wed Jan 2 14:18:54 2019
@author Weifeng Liu
*/
// CSC Sync-Free SpTRSM kernel
// see paper by W. Liu, A. Li, J. D. Hogg, I. S. Duff, and B. Vinter. (2016).
// "A Synchronization-Free Algorithm for Parallel Sparse Triangular Solves".
// 22nd International European Conference on Parallel and Distributed Computing
// (Euro-Par '16). pp. 617-630.
#include "magmasparse_internal.h"
#include "atomicopsfloat.h"
#include <cuda.h> // for CUDA_VERSION
#define MAGMA_CSC_SYNCFREE_WARP_SIZE 32
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD 0
#define MAGMA_CSC_SYNCFREE_SUBSTITUTION_BACKWARD 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ 1
#define MAGMA_CSC_SYNCFREE_OPT_WARP_RHS 2
#define MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO 3
__global__
void sptrsv_syncfree_analyser(magmaIndex_ptr d_cscRowIdx,
magmaFloat_ptr d_cscVal,
magma_int_t m,
magma_int_t nnz,
magmaIndex_ptr d_graphInDegree)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
if (global_id < nnz)
{
atomicAdd(&d_graphInDegree[d_cscRowIdx[global_id]], 1);
}
}
__global__
void sptrsm_syncfree_executor(magmaIndex_ptr d_cscColPtr,
magmaIndex_ptr d_cscRowIdx,
magmaFloat_ptr d_cscVal,
magmaIndex_ptr d_graphInDegree,
magma_int_t m,
magma_int_t substitution,
magma_int_t rhs,
magma_int_t opt,
magmaFloat_ptr d_b,
magmaFloat_ptr d_x)
{
const int global_id = blockIdx.x * blockDim.x + threadIdx.x;
int global_x_id = global_id / MAGMA_CSC_SYNCFREE_WARP_SIZE;
if (global_x_id >= m) return;
// substitution is forward or backward
global_x_id = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
global_x_id : m - 1 - global_x_id;
// Initialize
const int lane_id = (MAGMA_CSC_SYNCFREE_WARP_SIZE - 1) & threadIdx.x;
// Prefetch
const int pos = substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id] : d_cscColPtr[global_x_id+1]-1;
const float one = MAGMA_S_MAKE( 1.0, 0.0);
const float coef = one / d_cscVal[pos];
/*
// clock_t start;
// Consumer
do {
start = clock();
}
while (1 != d_graphInDegree[global_x_id]);
// Consumer
int graphInDegree;
do {
//bypass Tex cache and avoid other mem optimization by nvcc/ptxas
asm("ld.global.u32 %0, [%1];" : "=r"(graphInDegree),"=r"(d_graphInDegree[global_x_id]) :: "memory");
}
while (1 != graphInDegree );
*/
for (int k = lane_id; k < rhs; k += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const int pos = global_x_id * rhs + k;
d_x[pos] = (d_b[pos] - d_x[pos]) * coef;
}
// Producer
const magma_index_t start_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id]+1 : d_cscColPtr[global_x_id];
const magma_index_t stop_ptr =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
d_cscColPtr[global_x_id+1] : d_cscColPtr[global_x_id+1]-1;
if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_NNZ)
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_RHS)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else if (opt == MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO)
{
const magma_index_t len = stop_ptr - start_ptr;
if ((len <= rhs || rhs > 8) && len < 2048)
{
for (magma_index_t jj = start_ptr; jj < stop_ptr; jj++)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = lane_id;
k < rhs; k+=MAGMA_CSC_SYNCFREE_WARP_SIZE)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
if (!lane_id) atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
else
{
for (magma_index_t jj = start_ptr + lane_id;
jj < stop_ptr; jj += MAGMA_CSC_SYNCFREE_WARP_SIZE)
{
const magma_index_t j =
substitution == MAGMA_CSC_SYNCFREE_SUBSTITUTION_FORWARD ?
jj : stop_ptr - 1 - (jj - start_ptr);
const magma_index_t rowIdx = d_cscRowIdx[j];
for (magma_index_t k = 0; k < rhs; k++)
atomicAddfloat(&d_x[rowIdx * rhs + k],
d_x[global_x_id * rhs + k] * d_cscVal[j]);
__threadfence();
atomicSub(&d_graphInDegree[rowIdx], 1);
}
}
}
}
extern "C" magma_int_t
magma_sgecscsyncfreetrsm_analysis(
magma_int_t m,
magma_int_t nnz,
magmaFloat_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
int num_threads = 128;
int num_blocks = ceil ((float)nnz / (float)num_threads);
cudaMemset(dgraphindegree, 0, m * sizeof(magma_index_t));
sptrsv_syncfree_analyser<<< num_blocks, num_threads >>>
(drowind, dval, m, nnz, dgraphindegree);
// backup in-degree array
cudaMemcpy(dgraphindegree_bak, dgraphindegree,
m * sizeof(int), cudaMemcpyDeviceToDevice);
return info;
}
extern "C" magma_int_t
magma_sgecscsyncfreetrsm_solve(
magma_int_t m,
magma_int_t nnz,
magmaFloat_ptr dval,
magmaIndex_ptr dcolptr,
magmaIndex_ptr drowind,
magmaIndex_ptr dgraphindegree,
magmaIndex_ptr dgraphindegree_bak,
magmaFloat_ptr dx,
magmaFloat_ptr db,
magma_int_t substitution,
magma_int_t rhs,
magma_queue_t queue )
{
int info = MAGMA_SUCCESS;
// get an unmodified in-degree array, only for benchmarking use
cudaMemcpy(dgraphindegree, dgraphindegree_bak,
m * sizeof(magma_index_t), cudaMemcpyDeviceToDevice);
// clear d_x for atomic operations
cudaMemset(dx, 0, sizeof(float) * m * rhs);
int num_threads, num_blocks;
num_threads = 4 * MAGMA_CSC_SYNCFREE_WARP_SIZE;
num_blocks = ceil ((float)m /
(float)(num_threads/MAGMA_CSC_SYNCFREE_WARP_SIZE));
sptrsm_syncfree_executor<<< num_blocks, num_threads >>>
(dcolptr, drowind, dval, dgraphindegree,
m, substitution, rhs, MAGMA_CSC_SYNCFREE_OPT_WARP_AUTO,
db, dx);
return info;
}
|
981e3be0e19c785cabf431088fff39a695fb770b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--blockDim=32 --gridDim=32
// IMPERIAL EDIT: this kernel was commented out
#include "common.h"
__global__ void d_render(uint * d_output, uint imageW, uint imageH, float pas, float df, float tPixel)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint id = x + y * imageW;
if( x < imageW && y < imageH )
{
//float tPixel = 2.0f/(float)min(imageW,imageH);
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w,M.m[1].w,M.m[2].w);
R.u = make_float3(M.m[0])*df
+ make_float3(M.m[2])*(float(x)-float(imageW)*0.5f)*tPixel
+ make_float3(M.m[1])*(float(y)-float(imageH)*0.5f)*tPixel;
R.u = normalize(R.u);
Sphere s(cnode[1].s), s2(cnode[2].s), st(cnode[2].s);
float t, t2, tt;
s.C.x += pas, s2.C.x += pas;
t = intersectionSphere(R,s.C,s.r);
t2 = intersectionSphere(R,s2.C,s2.r);
if( !t ) {
//myswap(s,s2);
//swap(t,t2);
tt = t;
t = t2;
t2 = tt;
st = s;
s = s2;
s2 = st;
}
else if( t2 && t2 < t ) {
//myswap(s,s2);
//swap(t,t2);
tt = t;
t = t2;
t2 = tt;
st = s;
s = s2;
s2 = st;
}
float4 f = make_float4(0,1,0,1)*(dot(getNormale(R.A+R.u*t,s.C),(-1.0f)*R.u));
uint n = rgbaFloatToInt(f);
//printf("%f\n",d_node[0].s.r);
if( t > 0.0f )
d_output[id] = n;
//else d_output[id] = 0;
}
__syncthreads();
}
| 981e3be0e19c785cabf431088fff39a695fb770b.cu | //pass
//--blockDim=32 --gridDim=32
// IMPERIAL EDIT: this kernel was commented out
#include "common.h"
__global__ void d_render(uint * d_output, uint imageW, uint imageH, float pas, float df, float tPixel)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint id = x + y * imageW;
if( x < imageW && y < imageH )
{
//float tPixel = 2.0f/(float)min(imageW,imageH);
matrice3x4 M(MView);
Rayon R;
R.A = make_float3(M.m[0].w,M.m[1].w,M.m[2].w);
R.u = make_float3(M.m[0])*df
+ make_float3(M.m[2])*(float(x)-float(imageW)*0.5f)*tPixel
+ make_float3(M.m[1])*(float(y)-float(imageH)*0.5f)*tPixel;
R.u = normalize(R.u);
Sphere s(cnode[1].s), s2(cnode[2].s), st(cnode[2].s);
float t, t2, tt;
s.C.x += pas, s2.C.x += pas;
t = intersectionSphere(R,s.C,s.r);
t2 = intersectionSphere(R,s2.C,s2.r);
if( !t ) {
//myswap(s,s2);
//swap(t,t2);
tt = t;
t = t2;
t2 = tt;
st = s;
s = s2;
s2 = st;
}
else if( t2 && t2 < t ) {
//myswap(s,s2);
//swap(t,t2);
tt = t;
t = t2;
t2 = tt;
st = s;
s = s2;
s2 = st;
}
float4 f = make_float4(0,1,0,1)*(dot(getNormale(R.A+R.u*t,s.C),(-1.0f)*R.u));
uint n = rgbaFloatToInt(f);
//printf("%f\n",d_node[0].s.r);
if( t > 0.0f )
d_output[id] = n;
//else d_output[id] = 0;
}
__syncthreads();
}
|
0d9d4741c5311e1069eacf4358dfa6e01a0dc8dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "red-serial.hpp"
#include "large-case.hpp"
#include <iostream>
#include <vector>
#include <cassert>
#include "rocblas.h"
#include <omp.h>
#define N_TIME 1
typedef std::vector<float> vf;
void mult_mv(float *matrix,int matrix_rows,int matrix_cols,float *vector, int vector_size,float *result,int result_size){
hipblasInit();
float *d_matrix;
float *d_vector;
float *d_result;
// Asignacin de memoria
hipblasAlloc(matrix_rows*matrix_cols,sizeof(float),(void**)&d_matrix);
hipblasAlloc(vector_size,sizeof(float),(void**)&d_vector);
hipblasAlloc(result_size,sizeof(float),(void**)&d_result);
// Copiado de memoria Host to Device
hipblasSetMatrix(matrix_rows,matrix_cols,sizeof(float),matrix,matrix_rows,d_matrix,matrix_rows);
hipblasSetVector(vector_size,sizeof(float),vector,1,d_vector,1);
// Llamada a kernel de cublas para multiplicar d_matrix por vector
hipblasSgemv('t',matrix_cols,matrix_rows,1,d_matrix,matrix_cols,d_vector,1,0,d_result,1);
hipDeviceSynchronize();
hipblasGetVector(result_size,sizeof(float),d_result,1,result,1);
hipblasFree(d_matrix);
hipblasFree(d_vector);
hipblasFree(d_result);
hipblasShutdown();
}
void parallel_forward_pass(int n_input, int n_hidden, int n_output,
float input[], float hidden[], float output[],
float weights_ih[], float weights_ho[]){
int h, j;
const float hidden_bias = 1.0;
const float output_bias = 1.0;
/* Initialize hidden-layer neurons with zero. */
std::fill_n(hidden, n_hidden, 0.0);
// Procesamiento de capa escondida con CUDA-CUBLAS
mult_mv(weights_ih,n_hidden,n_input,input,n_input,hidden,n_hidden);
/* Hidden neuron activation */
for (h = 0; h < n_hidden; ++h)
hidden[h] = logistic(hidden[h] + hidden_bias);
mult_mv(weights_ho,n_output,n_hidden,hidden,n_hidden,output,n_output);
/* Output neuron activation */
for (j = 0; j < n_output; ++j)
output[j] = logistic(output[j] + output_bias);
}
int main() {
vf input, hidden;
vf computed_output, actual_output;
vf weights_ih, weights_ho;
float t_ini,t_end;
std::cout << "Loading vector." << std::endl;
load_matrix(input, INPUT_FILE);
std::cout << "Loading weight_ih." << std::endl;
load_matrix(weights_ih, WEIGHTS_IH_FILE);
std::cout << "Loading weight_ho." << std::endl;
load_matrix(weights_ho, WEIGHTS_HO_FILE);
std::cout << "Loading actual output." << std::endl;
load_matrix(actual_output, OUTPUT_FILE);
assert(input.size() == INPUT_SIZE);
assert(weights_ih.size() == INPUT_SIZE * HIDDEN_SIZE);
assert(weights_ho.size() == HIDDEN_SIZE * OUTPUT_SIZE);
assert(actual_output.size() == OUTPUT_SIZE);
hidden.resize(HIDDEN_SIZE);
computed_output.resize(OUTPUT_SIZE);
std::cout << "Forward pass." << std::endl;
t_ini = omp_get_wtime();
for(int i = 0; i < N_TIME; i++){
parallel_forward_pass(INPUT_SIZE, HIDDEN_SIZE, OUTPUT_SIZE,
&input[0], &hidden[0], &computed_output[0],
&weights_ih[0], &weights_ho[0]);
}
t_end = omp_get_wtime();
std::cout << "First values of computed output: " << std::endl;
print_matrix(computed_output, 1, 16);
std::cout << "Actual first values: " << std::endl;
print_matrix(actual_output, 1, 16);
std::cout << "Forward Pass Time: " << std::endl;
std::cout << (t_end - t_ini)/(float)N_TIME << std::endl;
}
| 0d9d4741c5311e1069eacf4358dfa6e01a0dc8dc.cu | #include "red-serial.hpp"
#include "large-case.hpp"
#include <iostream>
#include <vector>
#include <cassert>
#include "cublas.h"
#include <omp.h>
#define N_TIME 1
typedef std::vector<float> vf;
void mult_mv(float *matrix,int matrix_rows,int matrix_cols,float *vector, int vector_size,float *result,int result_size){
cublasInit();
float *d_matrix;
float *d_vector;
float *d_result;
// Asignación de memoria
cublasAlloc(matrix_rows*matrix_cols,sizeof(float),(void**)&d_matrix);
cublasAlloc(vector_size,sizeof(float),(void**)&d_vector);
cublasAlloc(result_size,sizeof(float),(void**)&d_result);
// Copiado de memoria Host to Device
cublasSetMatrix(matrix_rows,matrix_cols,sizeof(float),matrix,matrix_rows,d_matrix,matrix_rows);
cublasSetVector(vector_size,sizeof(float),vector,1,d_vector,1);
// Llamada a kernel de cublas para multiplicar d_matrix por vector
cublasSgemv('t',matrix_cols,matrix_rows,1,d_matrix,matrix_cols,d_vector,1,0,d_result,1);
cudaThreadSynchronize();
cublasGetVector(result_size,sizeof(float),d_result,1,result,1);
cublasFree(d_matrix);
cublasFree(d_vector);
cublasFree(d_result);
cublasShutdown();
}
void parallel_forward_pass(int n_input, int n_hidden, int n_output,
float input[], float hidden[], float output[],
float weights_ih[], float weights_ho[]){
int h, j;
const float hidden_bias = 1.0;
const float output_bias = 1.0;
/* Initialize hidden-layer neurons with zero. */
std::fill_n(hidden, n_hidden, 0.0);
// Procesamiento de capa escondida con CUDA-CUBLAS
mult_mv(weights_ih,n_hidden,n_input,input,n_input,hidden,n_hidden);
/* Hidden neuron activation */
for (h = 0; h < n_hidden; ++h)
hidden[h] = logistic(hidden[h] + hidden_bias);
mult_mv(weights_ho,n_output,n_hidden,hidden,n_hidden,output,n_output);
/* Output neuron activation */
for (j = 0; j < n_output; ++j)
output[j] = logistic(output[j] + output_bias);
}
int main() {
vf input, hidden;
vf computed_output, actual_output;
vf weights_ih, weights_ho;
float t_ini,t_end;
std::cout << "Loading vector." << std::endl;
load_matrix(input, INPUT_FILE);
std::cout << "Loading weight_ih." << std::endl;
load_matrix(weights_ih, WEIGHTS_IH_FILE);
std::cout << "Loading weight_ho." << std::endl;
load_matrix(weights_ho, WEIGHTS_HO_FILE);
std::cout << "Loading actual output." << std::endl;
load_matrix(actual_output, OUTPUT_FILE);
assert(input.size() == INPUT_SIZE);
assert(weights_ih.size() == INPUT_SIZE * HIDDEN_SIZE);
assert(weights_ho.size() == HIDDEN_SIZE * OUTPUT_SIZE);
assert(actual_output.size() == OUTPUT_SIZE);
hidden.resize(HIDDEN_SIZE);
computed_output.resize(OUTPUT_SIZE);
std::cout << "Forward pass." << std::endl;
t_ini = omp_get_wtime();
for(int i = 0; i < N_TIME; i++){
parallel_forward_pass(INPUT_SIZE, HIDDEN_SIZE, OUTPUT_SIZE,
&input[0], &hidden[0], &computed_output[0],
&weights_ih[0], &weights_ho[0]);
}
t_end = omp_get_wtime();
std::cout << "First values of computed output: " << std::endl;
print_matrix(computed_output, 1, 16);
std::cout << "Actual first values: " << std::endl;
print_matrix(actual_output, 1, 16);
std::cout << "Forward Pass Time: " << std::endl;
std::cout << (t_end - t_ini)/(float)N_TIME << std::endl;
}
|
677f6bb9b757c6154a72d7bb3c1e8eb08d6d64b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <cmath>
#include "wfPad.h"
#include "revOp.h"
#include "timer.h"
extern "C"
{
/*
-----------------------------------------------------------------
*/
__global__ void copyPadded(fcomp * paste, fcomp * copyied, \
int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixelIdx_y = blockIdx.y * blockDim.y + threadIdx.y;
if(pixelIdx_x < nx && pixelIdx_y < nf){
int pixelIdx = pixelIdx_y * dim_x + pixelIdx_x + M;
paste[pixelIdx] = copyied[pixelIdx];
}
}
/*
------------------------------------------------------------------
*/
__global__ void imaging(fcomp * image, fcomp * forw_pulse, fcomp * back_pulse, \
int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
fcomp conv;
for(int j=0; j<nf; j++){
int Idx = j * dim_x + pixelIdx_x + M;
conv += forw_pulse[Idx] * thrust::conj(back_pulse[Idx]);
}
image[pixelIdx_x] = conv;
}
/*
------------------------------------------------------------------
*/
__global__ void extrapDepth(fcomp * new_wf, int nf, int nx, \
int M, fcomp * w_op, fcomp * old_wf)
{
int dim_x = nx+2*M;
int length_M = 2*M+1;
int xIdx = blockIdx.x * blockDim.x + threadIdx.x;
int fIdx = blockIdx.y * blockDim.y + threadIdx.y;
fcomp pixel = fcomp(0.0,0.0);
if(xIdx < nx && fIdx < nf){
for(int k=0; k<length_M; ++k){
pixel += w_op[fIdx*nx*length_M + k*nx + xIdx] * \
old_wf[fIdx*dim_x + xIdx + k];
}
new_wf[fIdx*dim_x + M + xIdx] = pixel;
}
} // end extrapolation to next depth
/*
------------------------------------------------------------------
*/
void extrapolate(int ns, int nextrap, int nz, int nt, int nf, int nx, int M,\
fcomp * w_op_forw, fcomp * forw_pulse, fcomp * w_op_back, fcomp * back_pulse,\
float * image)
{
//define important dimensionality parameters
int length_M = 2*M+1;
int dim_x = nx+2*M;
size_t sizePulse = nf * dim_x;
size_t sizeAllSources = ns * sizePulse;
size_t sizeOp = nextrap * nf * nx * length_M;
size_t sizeImage = nz * nx;
size_t sizeAllImages = ns * sizeImage;
//rearrange operators
timer t0("REARRANGE OPERATORS");
fcomp * h_w_op_forw = reverseOperator(w_op_forw, nextrap, nf, nx, length_M, t0); //reverse operator's last two indices on host
fcomp * h_w_op_back = reverseOperator(w_op_back, nextrap, nf, nx, length_M, t0); //reverse operator's last two indices on host
//allocate device memory
fcomp * d_image;
hipMalloc(&d_image, sizeAllImages * sizeof(fcomp));
fcomp * d_w_op_forw, * d_old_forw, * d_new_forw;
hipMalloc(&d_w_op_forw, sizeOp * sizeof(fcomp));
hipMalloc(&d_old_forw, sizeAllSources * sizeof(fcomp));
hipMalloc(&d_new_forw, sizeAllSources * sizeof(fcomp));
fcomp * d_w_op_back, * d_old_back, * d_new_back;
hipMalloc(&d_w_op_back, sizeOp * sizeof(fcomp));
hipMalloc(&d_old_back, sizeAllSources * sizeof(fcomp));
hipMalloc(&d_new_back, sizeAllSources * sizeof(fcomp));
//copy operators on device
hipMemcpy(d_w_op_forw, h_w_op_forw, sizeOp*sizeof(fcomp), hipMemcpyHostToDevice);
hipMemcpy(d_w_op_back, h_w_op_back, sizeOp*sizeof(fcomp), hipMemcpyHostToDevice);
timer t1("CONSTRUCT PADDED WAVEFIELDS");
t1.start();
//allocate and read wavefields
fcomp * h_image = new fcomp[sizeAllImages];
std::vector<wfpad> h_forw_pulses(ns);
std::vector<wfpad> h_back_pulses(ns);
for(int is=0; is<ns; ++is){
h_forw_pulses[is] = wfpad(nf, nx, 1, M, 0, &forw_pulse[is*nt*nx]);
h_back_pulses[is] = wfpad(nf, nx, 1, M, 0, &back_pulse[is*nt*nx]);
}
t1.stop();
//define number of blocks and number of threads per block
//define number of blocks and number of threads per block
dim3 nThreads(32, 1, 1);
size_t nBlocks_x = nx % nThreads.x == 0 ? size_t(nx/nThreads.x) : size_t(1 + nx/nThreads.x);
size_t nBlocks_y = nf % nThreads.y == 0 ? size_t(nf/nThreads.y) : size_t(1 + nf/nThreads.y);
size_t nBlocks_z = 1;
dim3 nBlocks(nBlocks_x, nBlocks_y, nBlocks_z);
std::cout << "nThreads: (" << nThreads.x << ", " << nThreads.y << ", " << nThreads.z << ")" << std::endl;
std::cout << "nBlocks: (" << nBlocks.x << ", " << nBlocks.y << ", " << nBlocks.z << ")" << std::endl;
//create one stream per source
hipStream_t streams[ns];
timer t5("EXTRAPOLATION AND IMAGING");
t5.start();
for(int is=0; is<ns; ++is){
hipStreamCreate(&streams[is]);
hipMemcpyAsync(&d_old_forw[is*sizePulse], h_forw_pulses[is].wf, \
sizePulse*sizeof(fcomp), hipMemcpyHostToDevice, streams[is]);
hipMemcpyAsync(&d_old_back[is*sizePulse], h_back_pulses[is].wf, \
sizePulse*sizeof(fcomp), hipMemcpyHostToDevice, streams[is]);
for(int l=0; l<nextrap; ++l){
int depthIdx = l*nf*nx*length_M;
hipLaunchKernelGGL(( extrapDepth), dim3(nBlocks), dim3(nThreads), 0, streams[is], &d_new_forw[is*sizePulse], nf, nx, \
M, &d_w_op_forw[depthIdx], &d_old_forw[is*sizePulse]);
hipLaunchKernelGGL(( extrapDepth), dim3(nBlocks), dim3(nThreads), 0, streams[is], &d_new_back[is*sizePulse], nf, nx, \
M, &d_w_op_back[depthIdx], &d_old_back[is*sizePulse]);
hipLaunchKernelGGL(( imaging), dim3(1), dim3(nx), 0, streams[is], &d_image[is*sizeImage + l*nx], &d_new_forw[is*sizePulse], \
&d_new_back[is*sizePulse], nf, nx, M);
hipLaunchKernelGGL(( copyPadded), dim3(nBlocks), dim3(nThreads), 0, streams[is], &d_old_forw[is*sizePulse], &d_new_forw[is*sizePulse],\
nf, nx, M);
hipLaunchKernelGGL(( copyPadded), dim3(nBlocks), dim3(nThreads), 0, streams[is], &d_old_back[is*sizePulse], &d_new_back[is*sizePulse],\
nf, nx, M);
}
hipMemcpyAsync(h_forw_pulses[is].wf, &d_new_forw[is*sizePulse], \
sizePulse*sizeof(fcomp), hipMemcpyDeviceToHost, streams[is]);
hipMemcpyAsync(h_back_pulses[is].wf, &d_new_back[is*sizePulse], \
sizePulse*sizeof(fcomp), hipMemcpyDeviceToHost, streams[is]);
hipMemcpyAsync(&h_image[is*sizeImage], &d_image[is*sizeImage], \
sizeImage*sizeof(fcomp), hipMemcpyDeviceToHost, streams[is]);
hipStreamDestroy(streams[is]);
}
hipDeviceSynchronize();
t5.stop();
timer t2("WRITE-BACK UNPADDED WAVEFIELDS");
t2.start();
//copy to unpadded memory
for(int is=0; is<ns; ++is)
for (int j=0; j<nf; ++j)
for (int i=0; i<nx; ++i){
forw_pulse[is*sizePulse + j*nx + i] = h_forw_pulses[is].wf[j*dim_x + i + M];
back_pulse[is*sizePulse + j*nx + i] = h_back_pulses[is].wf[j*dim_x + i + M];
}
t2.stop();
timer t3("READ IMAGES");
t3.start();
//take real part of images
for(int is=0; is<ns; ++is)
for(int l=0; l<nextrap; ++l)
for(int i=0; i<nx; ++i){
image[is*sizeImage + l*nx + i] = reinterpret_cast<float*>(h_image)[2*(is*sizeImage + l*nx + i)];
}
t3.stop();
timer t4("FREE HOST MEMORY");
t4.start();
//free host memory
delete [] h_image;
delete [] h_w_op_forw;
delete [] h_w_op_back;
t4.stop();
//free device memory
hipFree(d_w_op_forw);
hipFree(d_w_op_back);
hipFree(d_new_forw);
hipFree(d_old_forw);
hipFree(d_new_back);
hipFree(d_old_back);
hipFree(d_image);
std::cout << std::endl;
std::cout << "------- Timer info -------" << std::endl;
std::cout << "--------------------------" << std::endl;
t0.dispInfo();
t1.dispInfo();
t2.dispInfo();
t3.dispInfo();
t4.dispInfo();
t5.dispInfo();
}
} //end extern "C"
| 677f6bb9b757c6154a72d7bb3c1e8eb08d6d64b2.cu |
#include <iostream>
#include <vector>
#include <cmath>
#include "wfPad.h"
#include "revOp.h"
#include "timer.h"
extern "C"
{
/*
-----------------------------------------------------------------
*/
__global__ void copyPadded(fcomp * paste, fcomp * copyied, \
int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixelIdx_y = blockIdx.y * blockDim.y + threadIdx.y;
if(pixelIdx_x < nx && pixelIdx_y < nf){
int pixelIdx = pixelIdx_y * dim_x + pixelIdx_x + M;
paste[pixelIdx] = copyied[pixelIdx];
}
}
/*
------------------------------------------------------------------
*/
__global__ void imaging(fcomp * image, fcomp * forw_pulse, fcomp * back_pulse, \
int nf, int nx, int M)
{
int dim_x = nx+2*M;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
fcomp conv;
for(int j=0; j<nf; j++){
int Idx = j * dim_x + pixelIdx_x + M;
conv += forw_pulse[Idx] * thrust::conj(back_pulse[Idx]);
}
image[pixelIdx_x] = conv;
}
/*
------------------------------------------------------------------
*/
__global__ void extrapDepth(fcomp * new_wf, int nf, int nx, \
int M, fcomp * w_op, fcomp * old_wf)
{
int dim_x = nx+2*M;
int length_M = 2*M+1;
int xIdx = blockIdx.x * blockDim.x + threadIdx.x;
int fIdx = blockIdx.y * blockDim.y + threadIdx.y;
fcomp pixel = fcomp(0.0,0.0);
if(xIdx < nx && fIdx < nf){
for(int k=0; k<length_M; ++k){
pixel += w_op[fIdx*nx*length_M + k*nx + xIdx] * \
old_wf[fIdx*dim_x + xIdx + k];
}
new_wf[fIdx*dim_x + M + xIdx] = pixel;
}
} // end extrapolation to next depth
/*
------------------------------------------------------------------
*/
void extrapolate(int ns, int nextrap, int nz, int nt, int nf, int nx, int M,\
fcomp * w_op_forw, fcomp * forw_pulse, fcomp * w_op_back, fcomp * back_pulse,\
float * image)
{
//define important dimensionality parameters
int length_M = 2*M+1;
int dim_x = nx+2*M;
size_t sizePulse = nf * dim_x;
size_t sizeAllSources = ns * sizePulse;
size_t sizeOp = nextrap * nf * nx * length_M;
size_t sizeImage = nz * nx;
size_t sizeAllImages = ns * sizeImage;
//rearrange operators
timer t0("REARRANGE OPERATORS");
fcomp * h_w_op_forw = reverseOperator(w_op_forw, nextrap, nf, nx, length_M, t0); //reverse operator's last two indices on host
fcomp * h_w_op_back = reverseOperator(w_op_back, nextrap, nf, nx, length_M, t0); //reverse operator's last two indices on host
//allocate device memory
fcomp * d_image;
cudaMalloc(&d_image, sizeAllImages * sizeof(fcomp));
fcomp * d_w_op_forw, * d_old_forw, * d_new_forw;
cudaMalloc(&d_w_op_forw, sizeOp * sizeof(fcomp));
cudaMalloc(&d_old_forw, sizeAllSources * sizeof(fcomp));
cudaMalloc(&d_new_forw, sizeAllSources * sizeof(fcomp));
fcomp * d_w_op_back, * d_old_back, * d_new_back;
cudaMalloc(&d_w_op_back, sizeOp * sizeof(fcomp));
cudaMalloc(&d_old_back, sizeAllSources * sizeof(fcomp));
cudaMalloc(&d_new_back, sizeAllSources * sizeof(fcomp));
//copy operators on device
cudaMemcpy(d_w_op_forw, h_w_op_forw, sizeOp*sizeof(fcomp), cudaMemcpyHostToDevice);
cudaMemcpy(d_w_op_back, h_w_op_back, sizeOp*sizeof(fcomp), cudaMemcpyHostToDevice);
timer t1("CONSTRUCT PADDED WAVEFIELDS");
t1.start();
//allocate and read wavefields
fcomp * h_image = new fcomp[sizeAllImages];
std::vector<wfpad> h_forw_pulses(ns);
std::vector<wfpad> h_back_pulses(ns);
for(int is=0; is<ns; ++is){
h_forw_pulses[is] = wfpad(nf, nx, 1, M, 0, &forw_pulse[is*nt*nx]);
h_back_pulses[is] = wfpad(nf, nx, 1, M, 0, &back_pulse[is*nt*nx]);
}
t1.stop();
//define number of blocks and number of threads per block
//define number of blocks and number of threads per block
dim3 nThreads(32, 1, 1);
size_t nBlocks_x = nx % nThreads.x == 0 ? size_t(nx/nThreads.x) : size_t(1 + nx/nThreads.x);
size_t nBlocks_y = nf % nThreads.y == 0 ? size_t(nf/nThreads.y) : size_t(1 + nf/nThreads.y);
size_t nBlocks_z = 1;
dim3 nBlocks(nBlocks_x, nBlocks_y, nBlocks_z);
std::cout << "nThreads: (" << nThreads.x << ", " << nThreads.y << ", " << nThreads.z << ")" << std::endl;
std::cout << "nBlocks: (" << nBlocks.x << ", " << nBlocks.y << ", " << nBlocks.z << ")" << std::endl;
//create one stream per source
cudaStream_t streams[ns];
timer t5("EXTRAPOLATION AND IMAGING");
t5.start();
for(int is=0; is<ns; ++is){
cudaStreamCreate(&streams[is]);
cudaMemcpyAsync(&d_old_forw[is*sizePulse], h_forw_pulses[is].wf, \
sizePulse*sizeof(fcomp), cudaMemcpyHostToDevice, streams[is]);
cudaMemcpyAsync(&d_old_back[is*sizePulse], h_back_pulses[is].wf, \
sizePulse*sizeof(fcomp), cudaMemcpyHostToDevice, streams[is]);
for(int l=0; l<nextrap; ++l){
int depthIdx = l*nf*nx*length_M;
extrapDepth<<<nBlocks, nThreads, 0, streams[is]>>>(&d_new_forw[is*sizePulse], nf, nx, \
M, &d_w_op_forw[depthIdx], &d_old_forw[is*sizePulse]);
extrapDepth<<<nBlocks, nThreads, 0, streams[is]>>>(&d_new_back[is*sizePulse], nf, nx, \
M, &d_w_op_back[depthIdx], &d_old_back[is*sizePulse]);
imaging<<<1, nx, 0, streams[is]>>>(&d_image[is*sizeImage + l*nx], &d_new_forw[is*sizePulse], \
&d_new_back[is*sizePulse], nf, nx, M);
copyPadded<<<nBlocks, nThreads, 0, streams[is]>>>(&d_old_forw[is*sizePulse], &d_new_forw[is*sizePulse],\
nf, nx, M);
copyPadded<<<nBlocks, nThreads, 0, streams[is]>>>(&d_old_back[is*sizePulse], &d_new_back[is*sizePulse],\
nf, nx, M);
}
cudaMemcpyAsync(h_forw_pulses[is].wf, &d_new_forw[is*sizePulse], \
sizePulse*sizeof(fcomp), cudaMemcpyDeviceToHost, streams[is]);
cudaMemcpyAsync(h_back_pulses[is].wf, &d_new_back[is*sizePulse], \
sizePulse*sizeof(fcomp), cudaMemcpyDeviceToHost, streams[is]);
cudaMemcpyAsync(&h_image[is*sizeImage], &d_image[is*sizeImage], \
sizeImage*sizeof(fcomp), cudaMemcpyDeviceToHost, streams[is]);
cudaStreamDestroy(streams[is]);
}
cudaDeviceSynchronize();
t5.stop();
timer t2("WRITE-BACK UNPADDED WAVEFIELDS");
t2.start();
//copy to unpadded memory
for(int is=0; is<ns; ++is)
for (int j=0; j<nf; ++j)
for (int i=0; i<nx; ++i){
forw_pulse[is*sizePulse + j*nx + i] = h_forw_pulses[is].wf[j*dim_x + i + M];
back_pulse[is*sizePulse + j*nx + i] = h_back_pulses[is].wf[j*dim_x + i + M];
}
t2.stop();
timer t3("READ IMAGES");
t3.start();
//take real part of images
for(int is=0; is<ns; ++is)
for(int l=0; l<nextrap; ++l)
for(int i=0; i<nx; ++i){
image[is*sizeImage + l*nx + i] = reinterpret_cast<float*>(h_image)[2*(is*sizeImage + l*nx + i)];
}
t3.stop();
timer t4("FREE HOST MEMORY");
t4.start();
//free host memory
delete [] h_image;
delete [] h_w_op_forw;
delete [] h_w_op_back;
t4.stop();
//free device memory
cudaFree(d_w_op_forw);
cudaFree(d_w_op_back);
cudaFree(d_new_forw);
cudaFree(d_old_forw);
cudaFree(d_new_back);
cudaFree(d_old_back);
cudaFree(d_image);
std::cout << std::endl;
std::cout << "------- Timer info -------" << std::endl;
std::cout << "--------------------------" << std::endl;
t0.dispInfo();
t1.dispInfo();
t2.dispInfo();
t3.dispInfo();
t4.dispInfo();
t5.dispInfo();
}
} //end extern "C"
|
b3d461567ec458c1084c4543f583c38004b6ba60.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <rocblas.h>
#include "common_data.h"
using namespace std;
int totalMemD = 0;
int permanentMemD = 0; //
int modelMemD = 0; //
int degreeMemD = 0; // BB
int tessMemD = 0; // B
int viewMemD = 0; //
//ofstream fout("cuda.txt");
void callCudaThreadSynchronize()
{
hipDeviceSynchronize();
}
/* B */
extern float matrix_b_spline_f[185];
static __device__ float matrix_b_spline_d[185];
/* B B */
template <typename T>
__host__ __device__ T *matrixCase(T *matrix_b_spline, int order, int ctrlPointNum, int leftIdx)
{
if (order == 1)
return matrix_b_spline; // MB1
else if (order == 2)
return matrix_b_spline + 1; // MB2
else if (order == 3)
{
if (ctrlPointNum == 3)
return matrix_b_spline + 5; // MB30
else
{
if (leftIdx == 2)
return matrix_b_spline + 14; // MB31
else if (leftIdx == ctrlPointNum - 1)
return matrix_b_spline + 23; // MB32
else
return matrix_b_spline + 32; // MB33
}
}
else
{
if (ctrlPointNum == 4)
return matrix_b_spline + 41; // MB40
else if (ctrlPointNum == 5)
{
if (leftIdx == 3)
return matrix_b_spline + 57; // MB41
else
return matrix_b_spline + 73; // MB42
}
else if (ctrlPointNum == 6)
{
if (leftIdx == 3)
return matrix_b_spline + 89; // MB43
else if (leftIdx == 4)
return matrix_b_spline + 105; // MB44
else
return matrix_b_spline + 121; // MB45
}
else
{
if (leftIdx == 3)
return matrix_b_spline + 89; // MB43
else if (leftIdx == 4)
return matrix_b_spline + 137; // MB46
else if (leftIdx == ctrlPointNum - 2)
return matrix_b_spline + 153; // MB47
else if (leftIdx == ctrlPointNum - 1)
return matrix_b_spline + 121; // MB45
else
return matrix_b_spline + 169; // MB48
}
}
}
// CPU
double *matrixCaseHost(double *matrix_b_spline, int order, int ctrlPointNum, int leftIdx)
{
return matrixCase(matrix_b_spline, order, ctrlPointNum, leftIdx);
}
static __device__ float3 ctrlPointD[15][15][15]; // truthFFD
static __device__ float knotListD[3 * 20]; //
/*
* B
* FFD
*/
__device__ float3 BSplineVolumeValueMatrixD(float u, float v, float w,
int leftUIdx, int leftVIdx, int leftWIdx,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
float3 result;
float3 tempCtrlPoint1[4];
float3 tempCtrlPoint2[4][4];
float *M, temp[4], mul1[4];
float tempKnot = knotListD[leftUIdx];
u = (u - tempKnot) / (knotListD[leftUIdx + 1] - tempKnot);
tempKnot = knotListD[20 + leftVIdx];
v = (v - tempKnot) / (knotListD[20 + leftVIdx + 1] - tempKnot);
tempKnot = knotListD[40 + leftWIdx];
w = (w - tempKnot) / (knotListD[40 + leftWIdx + 1] - tempKnot);
//
temp[0] = 1.0f;
temp[1] = w;
temp[2] = w * w;
temp[3] = temp[2] * w;
M = matrixCase(matrix_b_spline_d, orderW, ctrlPointNumW, leftWIdx);
for (int i = 0; i < orderW; ++i)
{
mul1[i] = 0.0f;
for (int j = 0; j < orderW; ++j)
{
mul1[i] += temp[j] * M[j * orderW + i];
}
}
for (int i = 0; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
tempCtrlPoint2[i][j].x = 0.0f;
tempCtrlPoint2[i][j].y = 0.0f;
tempCtrlPoint2[i][j].z = 0.0f;
for (int k = 0; k < orderW; ++k)
{
float3 cp = ctrlPointD[leftUIdx - i][leftVIdx - j][leftWIdx - k];
tempCtrlPoint2[i][j].x += cp.x * mul1[orderW - 1 - k];
tempCtrlPoint2[i][j].y += cp.y * mul1[orderW - 1 - k];
tempCtrlPoint2[i][j].z += cp.z * mul1[orderW - 1 - k];
}
}
}
//
temp[1] = v;
temp[2] = v * v;
temp[3] = temp[2] * v;
M = matrixCase(matrix_b_spline_d, orderV, ctrlPointNumV, leftVIdx);
for (int i = 0; i < orderV; ++i)
{
mul1[i] = 0.0;
for (int j = 0; j < orderV; ++j)
{
mul1[i] += temp[j] * M[j * orderV + i];
}
}
for (int i = 0; i < orderU; ++i)
{
tempCtrlPoint1[i].x = 0.0f;
tempCtrlPoint1[i].y = 0.0f;
tempCtrlPoint1[i].z = 0.0f;
for (int j = 0; j < orderV; ++j)
{
tempCtrlPoint1[i].x += tempCtrlPoint2[i][j].x * mul1[orderV - 1 - j];
tempCtrlPoint1[i].y += tempCtrlPoint2[i][j].y * mul1[orderV - 1 - j];
tempCtrlPoint1[i].z += tempCtrlPoint2[i][j].z * mul1[orderV - 1 - j];
}
}
//
temp[1] = u;
temp[2] = u * u;
temp[3] = temp[2] * u;
M = matrixCase(matrix_b_spline_d, orderU, ctrlPointNumU, leftUIdx);
for (int i = 0; i < orderU; ++i)
{
mul1[i] = 0.0;
for (int j = 0; j < orderU; ++j)
{
mul1[i] += temp[j] * M[j * orderU + i];
}
}
result.x = 0.0f;
result.y = 0.0f;
result.z = 0.0f;
for (int i = 0; i < orderU; ++i)
{
result.x += tempCtrlPoint1[i].x * mul1[orderU - 1 - i];
result.y += tempCtrlPoint1[i].y * mul1[orderU - 1 - i];
result.z += tempCtrlPoint1[i].z * mul1[orderU - 1 - i];
}
return result;
}
/*
* kernel u, v, w B
* FFD
*/
__global__ void fromParamToCoordOnePoint(float3 *vertexCoordListD, float3 *vertexParamListD,
int vertexCount, int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW,
int knotIntervalCountU, int knotIntervalCountV, int knotIntervalCountW)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= vertexCount)
return;
float3 tempVertexParam = vertexParamListD[idx];
float u = tempVertexParam.x;
float v = tempVertexParam.y;
float w = tempVertexParam.z;
//
int leftUIdx, leftVIdx, leftWIdx;
leftUIdx = orderU - 1 + knotIntervalCountU - 1;
leftVIdx = orderV - 1 + knotIntervalCountV - 1;
leftWIdx = orderW - 1 + knotIntervalCountW - 1;
// U
for (int i = orderU - 1; i <= orderU - 1 + knotIntervalCountU - 1; ++i)
{
if (u >= knotListD[i] && u < knotListD[i + 1])
{
leftUIdx = i;
break;
}
}
// V
for (int j = orderV - 1; j <= orderV - 1 + knotIntervalCountV - 1; ++j)
{
if (v >= knotListD[20 + j] && v < knotListD[20 + j + 1])
{
leftVIdx = j;
break;
}
}
// W
for (int k = orderW - 1; k <= orderW - 1 + knotIntervalCountW - 1; ++k)
{
if (w >= knotListD[40 + k] && w < knotListD[40 + k + 1])
{
leftWIdx = k;
break;
}
}
vertexCoordListD[idx] = BSplineVolumeValueMatrixD(u, v, w, leftUIdx, leftVIdx, leftWIdx,
orderU, orderV, orderW,
ctrlPointNumU, ctrlPointNumV, ctrlPointNumW);
}
float3 *vertexParamListD = 0; //
float3 *vertexCoordListD = 0; //
float3 *vertexParamListD_teapot = 0; //
float3 *normalParamListD_teapot = 0; //
//float3 *vertexCoordListD_teapot = 0; //
int vertexCount_teapot;
int order[3], ctrlPointNum[3], knotIntervalCount[3], knotCount[3]; //
float knotList[3][20]; //
float3 ctrlPoint[15][15][15]; // B
/*
* B
* FFD
*/
void fromParamToCoordD(CommonData *commonData)
{
int vertexCount = commonData->vertexCount();
int threadCount = commonData->ffdThreadCount();
hipLaunchKernelGGL(( fromParamToCoordOnePoint), dim3(vertexCount / threadCount + 1), dim3(threadCount), 0, 0,
vertexCoordListD, vertexParamListD,
vertexCount, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W],
knotIntervalCount[U], knotIntervalCount[V], knotIntervalCount[W]);
float3 *vertexCoordList = new float3[vertexCount];
hipMemcpy(vertexCoordList, vertexCoordListD, sizeof(float3) * vertexCount, hipMemcpyDeviceToHost);
for (int i = 0; i < vertexCount; ++i)
commonData->setVertexCoord(i, vertexCoordList[i].x, vertexCoordList[i].y, vertexCoordList[i].z);
delete []vertexCoordList;
}
/*------------------------------------------------------- FFD ---------------------------------------------------------*/
/*------------------------------------------------------- AFFD ---------------------------------------------------------*/
/* astring */
string longNumber(int a)
{
string result;
do
{
ostringstream oss;
int remainder = a % 1000;
if (a >= 1000)
{
if (remainder < 10)
oss << "00" << remainder;
else if (remainder >= 10 && remainder < 100)
oss << "0" << remainder;
else
oss << remainder;
}
else
oss << remainder;
if (result.size() == 0)
result = oss.str();
else
result = oss.str() + "," + result;
a /= 1000;
}while(a > 0);
return result;
}
/* */
void printMemD(const char *file, const char *function, int line, int memSize, string info)
{
/* */
string fileName(file);
int lastSlashPos = fileName.rfind('/');
fileName = fileName.substr(lastSlashPos + 1, fileName.size());
/*#define PRINT_MEM*/
#ifdef PRINT_MEM
/*totalMemD += memSize;*/
cout << info << "\n"
<< "\t" << fileName << "" << function << ", " << line << "" << longNumber(memSize) << ", "
<< "" << longNumber(permanentMemD + modelMemD + degreeMemD + tessMemD + viewMemD) << "\n"
<< "\tpermanent = " << longNumber(permanentMemD) << ", model = " << longNumber(modelMemD)
<< ", degreeMemD = " << longNumber(degreeMemD) << ", tessMemD = " << longNumber(tessMemD)
<< ", view = " << longNumber(viewMemD) << endl;
#endif
}
void printCudaError(const char *file, const char *function, int line)
{
/* */
string fileName(file);
int lastSlashPos = fileName.rfind('/');
fileName = fileName.substr(lastSlashPos + 1, fileName.size());
hipError_t cymError = hipGetLastError();
if (cymError)
cout << fileName << "" << line << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
}
__host__ __device__ inline const float3 operator+(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
__host__ __device__ inline const float3 operator-(const float3 &a, const float3 &b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
__host__ __device__ inline const float3 operator-(const float3 &a)
{
return make_float3(-a.x, -a.y, -a.z);
}
__host__ __device__ inline const float3 operator*(float a, const float3 &b)
{
return make_float3(a * b.x, a * b.y, a * b.z);
}
__host__ __device__ inline const float3 operator*(const float3 &a, float b)
{
return make_float3(a.x * b, a.y * b, a.z * b);
}
__host__ __device__ inline const float3 operator/(const float3 &a, float b)
{
return make_float3(a.x / b, a.y / b, a.z / b);
}
__host__ __device__ inline float operator*(const float3 &a, const float3 &b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
__device__ float3 cross(const float3 &a, const float3 &b)
{
return make_float3(a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x);
}
__host__ __device__ inline void operator*=(float3 &a, float b)
{
a.x *= b;
a.y *= b;
a.z *= b;
}
__host__ __device__ inline void operator/=(float3 &a, float b)
{
a.x /= b;
a.y /= b;
a.z /= b;
}
__host__ __device__ inline void operator+=(float3 &a, const float3 &b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
__host__ __device__ inline void operator-=(float3 &a, const float3 &b)
{
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
__device__ inline float length(const float3 &v)
{
return sqrt(v.x * v.x + v.y * v.y + v.z * v.z);
}
__device__ inline void normalize(float3 &v)
{
float length_inverse = 1.0 / length(v);
v *= length_inverse;
}
hipblasHandle_t cublas_handle = 0;
/* B */
void loadMatrixBSplineD()
{
hipMemcpyToSymbol(matrix_b_spline_d, matrix_b_spline_f, sizeof(float) * 185);
hipblasCreate(&cublas_handle);
}
static __device__ float3 newCtrlPointD[15][15][15][4][4][4]; // 4x4x4
/*
* BMu, Mv, Mw
*
*/
__global__ void calcNewCtrlPointD(int order_u, int order_v, int order_w,
int ctrlPointNum_u, int ctrlPointNum_v, int ctrlPointNum_w)
{
int ii = blockIdx.x;
int jj = blockIdx.y;
int kk = blockIdx.z;
int leftUIdx = ii + order_u - 1;
int leftVIdx = jj + order_v - 1;
int leftWIdx = kk + order_w - 1;
float *Mu = matrixCase(matrix_b_spline_d, order_u, ctrlPointNum_u, leftUIdx);
float *Mv = matrixCase(matrix_b_spline_d, order_v, ctrlPointNum_v, leftVIdx);
float *Mw = matrixCase(matrix_b_spline_d, order_w, ctrlPointNum_w, leftWIdx);
//
int base_i = leftUIdx - order_u + 1;
int base_j = leftVIdx - order_v + 1;
int base_k = leftWIdx - order_w + 1;
for (int k = 0; k < order_w; ++k)
for (int i = 0; i < order_u; ++i)
for (int j = 0; j < order_v; ++j)
{
newCtrlPointD[ii][jj][kk][i][j][k] = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < order_u; ++l)
{
float3 cp = ctrlPointD[base_i + l][base_j + j][base_k + k];
newCtrlPointD[ii][jj][kk][i][j][k] += Mu[i * order_u + l] * cp;
}
}
//
float3 box[4][4][4];
for (int i = 0; i < order_u; ++i)
for (int j = 0; j < order_v; ++j)
for (int k = 0; k < order_w; ++k)
{
box[i][j][k] = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < order_v; ++l)
{
float3 cp = newCtrlPointD[ii][jj][kk][i][l][k];
box[i][j][k] += Mv[j * order_v + l] * cp;
}
}
//
for (int j = 0; j < order_v; ++j)
for (int k = 0; k < order_w; ++k)
for (int i = 0; i < order_u; ++i)
{
newCtrlPointD[ii][jj][kk][i][j][k] = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < order_w; ++l)
{
float3 cp = box[i][j][l];
newCtrlPointD[ii][jj][kk][i][j][k] += Mw[k * order_w + l] * cp;
}
}
}
/*
* B
*
*/
void copyCtrlPointD(CommonData *commonData)
{
for (int i = 0; i < ctrlPointNum[U]; ++i)
{
for (int j = 0; j < ctrlPointNum[V]; ++j)
{
for (int k = 0; k < ctrlPointNum[W]; ++k)
{
ctrlPoint[i][j][k].x = (float)commonData->getCtrlPoint(i, j, k).x();
ctrlPoint[i][j][k].y = (float)commonData->getCtrlPoint(i, j, k).y();
ctrlPoint[i][j][k].z = (float)commonData->getCtrlPoint(i, j, k).z();
}
}
}
hipMemcpyToSymbol(ctrlPointD, &ctrlPoint[0][0][0], sizeof(float3) * 15 * 15 * 15);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
hipLaunchKernelGGL(( calcNewCtrlPointD), dim3(dim3(knotIntervalCount[U], knotIntervalCount[V], knotIntervalCount[W])), dim3(1), 0, 0,
order[U], order[V], order[W], ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
/* */
void preCalcD(CommonData *commonData)
{
for (int i = 0; i < 3; ++i)
{
order[i] = commonData->order(i);
ctrlPointNum[i] = commonData->ctrlPointCount(i);
knotIntervalCount[i] = commonData->knotIntervalCount(i);
knotCount[i] = order[i] + ctrlPointNum[i];
}
for (int i = 0; i < 3; ++i)
for (int j = 0; j < knotCount[i]; ++j)
knotList[i][j] = (float)commonData->getKnot(i, j);
hipMemcpyToSymbol(knotListD, &knotList[0][0], sizeof(float) * 3 * 20);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
int vertexCount = commonData->vertexCount();
float3 *vertexParamListAlloc = new float3[vertexCount];
for (int i = 0; i < vertexCount; ++i)
{
vertexParamListAlloc[i].x = (float)commonData->vertexParam(i).u();
vertexParamListAlloc[i].y = (float)commonData->vertexParam(i).v();
vertexParamListAlloc[i].z = (float)commonData->vertexParam(i).w();
}
modelMemD += sizeof(float3) * vertexCount;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float3) * vertexCount, "@FFD");
hipMalloc((void**)&vertexParamListD, sizeof(float3) * vertexCount);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
hipMemcpy(vertexParamListD, vertexParamListAlloc, sizeof(float3) * vertexCount, hipMemcpyHostToDevice);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
delete []vertexParamListAlloc;
vertexParamListAlloc = 0;
modelMemD += sizeof(float3) * vertexCount;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float3) * vertexCount, "@FFD");
hipMalloc((void**)&vertexCoordListD, sizeof(float3) * vertexCount);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
copyCtrlPointD(commonData);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
// teapot
vertexCount_teapot = commonData->vertexCount_teapot();
vertexParamListAlloc = new float3[vertexCount_teapot];
for (int i = 0; i < vertexCount_teapot; ++i)
{
vertexParamListAlloc[i].x = (float)commonData->vertexParam_teapot(i).x();
vertexParamListAlloc[i].y = (float)commonData->vertexParam_teapot(i).y();
vertexParamListAlloc[i].z = (float)commonData->vertexParam_teapot(i).z();
}
modelMemD += sizeof(float3) * vertexCount_teapot;
hipMalloc((void**)&vertexParamListD_teapot, sizeof(float3) * vertexCount_teapot);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
hipMemcpy(vertexParamListD_teapot, vertexParamListAlloc, sizeof(float3) * vertexCount_teapot, hipMemcpyHostToDevice);
for (int i = 0; i < vertexCount_teapot; ++i)
{
vertexParamListAlloc[i].x = (float)commonData->normalParam_teapot(i).i();
vertexParamListAlloc[i].y = (float)commonData->normalParam_teapot(i).j();
vertexParamListAlloc[i].z = (float)commonData->normalParam_teapot(i).k();
}
hipMalloc((void**)&normalParamListD_teapot, sizeof(float3) * vertexCount_teapot);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
hipMemcpy(normalParamListD_teapot, vertexParamListAlloc, sizeof(float3) * vertexCount_teapot, hipMemcpyHostToDevice);
delete []vertexParamListAlloc;
}
int *matrixFittingIdxD;
float *matrixFittingD;
void loadTriangleMatrixD()
{
extern int matrixFittingIdx[100];
hipMalloc((void**)&matrixFittingIdxD, sizeof(int) * 100);
permanentMemD += sizeof(int) * 100;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(int) * 100, "@");
hipMemcpy(matrixFittingIdxD, matrixFittingIdx, sizeof(int) * 100, hipMemcpyHostToDevice);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
extern float matrixFitting[39417];
hipMalloc((void**)&matrixFittingD, sizeof(float) * 39417);
permanentMemD += sizeof(float) * 39417;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * 39417, "@");
hipMemcpy(matrixFittingD, matrixFitting, sizeof(float) * 39417, hipMemcpyHostToDevice);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
struct TriangleD
{
float3 v[3], n[3], n_adj_origin[3], n_adj[3];
#ifdef LINE
float3 bary_origin[3];
#endif
int nc[3]; // nc0, nc1, nc2v2v0, v0v1, v1v2
float2 vt[3];
};
TriangleD *triangleListD;
float *sampleValueD, *triangleCtrlPointD;
float3 *sampleValueD_PN;
float *triangleCtrlPointD_PN, *triangleNormalCtrlPointD_PN;
int *triangle_adjacent_tableD;
int degree, degree_lower, triangleCtrlPointNum, triangleCtrlPointNum_lower, triangleNum, constrait_point_num;
int blockSizeStep0 = 128, activeThreadNumStep0, blockNumStep0;
int blockSizeStep1 = 128, activeThreadNumStep1, blockNumStep1;
int blockSizeAdjNormal = 128, activeThreadNumAdjNormal, blockNumAdjNormal;
int blockSizeStep0_PN = 128, blockNumStep0_PN;
#ifdef TRUTH
float *B_1D_truth, *sampleValueD_truth;
int activeThreadNumStep0_truth, blockNumStep0_truth;
#endif
int matrixStartIdxFitting;
__host__ __device__ inline int index2c(int i, int j, int stride)
{
return j * stride + i;
}
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
float *triangular_ctrl_points;
#endif
void loadTriangleListD(const vector<Triangle> &triangleList, int *triangle_adjacent_table, int deg)
{
triangleNum = triangleList.size();
degree = deg;
/*degree_lower = deg;*/
degree_lower = 3;
triangleCtrlPointNum = (degree + 1) * (degree + 2) / 2;
triangleCtrlPointNum_lower = (degree_lower + 1) * (degree_lower + 2) / 2;
constrait_point_num = 3 * degree_lower;
TriangleD *tempTriangleList = new TriangleD[triangleNum];
for (vector<Triangle>::size_type i = 0; i < triangleNum; ++i)
{
for (int j = 0; j < 3; ++j)
{
tempTriangleList[i].v[j].x = triangleList[i].v[j].x();
tempTriangleList[i].v[j].y = triangleList[i].v[j].y();
tempTriangleList[i].v[j].z = triangleList[i].v[j].z();
tempTriangleList[i].n[j].x = triangleList[i].n[j].i();
tempTriangleList[i].n[j].y = triangleList[i].n[j].j();
tempTriangleList[i].n[j].z = triangleList[i].n[j].k();
tempTriangleList[i].n_adj_origin[j].x = triangleList[i].n_adj[j].i();
tempTriangleList[i].n_adj_origin[j].y = triangleList[i].n_adj[j].j();
tempTriangleList[i].n_adj_origin[j].z = triangleList[i].n_adj[j].k();
tempTriangleList[i].n_adj[j].x = triangleList[i].n_adj[j].i();
tempTriangleList[i].n_adj[j].y = triangleList[i].n_adj[j].j();
tempTriangleList[i].n_adj[j].z = triangleList[i].n_adj[j].k();
#ifdef LINE
tempTriangleList[i].bary_origin[j].x = triangleList[i].bary_origin[j].x();
tempTriangleList[i].bary_origin[j].y = triangleList[i].bary_origin[j].y();
tempTriangleList[i].bary_origin[j].z = triangleList[i].bary_origin[j].z();
#endif
tempTriangleList[i].nc[j] = triangleList[i].n_count[j];
tempTriangleList[i].vt[j].x = triangleList[i].vt[j].u();
tempTriangleList[i].vt[j].y = triangleList[i].vt[j].v();
}
}
hipMalloc((void**)&triangleListD, sizeof(TriangleD) * triangleNum);
degreeMemD += sizeof(TriangleD) * triangleNum;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(TriangleD) * triangleNum, "@");
hipMemcpy(triangleListD, tempTriangleList, sizeof(TriangleD) * triangleNum, hipMemcpyHostToDevice);
delete []tempTriangleList;
hipMalloc(&sampleValueD, sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6);
degreeMemD += sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6,
"@BezierT");
hipMalloc(&sampleValueD_PN, sizeof(float3) * triangleNum * 3 * 2);
hipMalloc(&triangleCtrlPointD_PN, sizeof(float) * (1 + 2 + 3 + 4) * triangleNum * 3);
hipMalloc(&triangleNormalCtrlPointD_PN, sizeof(float) * (1 + 2 + 3) * triangleNum * 3);
hipMalloc(&triangleCtrlPointD, sizeof(float) * triangleCtrlPointNum_lower * triangleNum * 6);
hipMalloc(&triangle_adjacent_tableD, sizeof(int) * triangleNum * 3);
hipMemcpy(triangle_adjacent_tableD, triangle_adjacent_table, sizeof(int) * triangleNum * 3, hipMemcpyHostToDevice);
#ifdef TRUTH
hipMalloc(&sampleValueD_truth, sizeof(float) * triangleCtrlPointNum * triangleNum * 3);
degreeMemD += sizeof(float) * triangleCtrlPointNum * triangleNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * triangleCtrlPointNum * triangleNum * 3,
"@BezierT");
activeThreadNumStep0_truth = triangleCtrlPointNum * triangleNum;
blockNumStep0_truth = ceil(static_cast<double>(activeThreadNumStep0_truth) / blockSizeStep0);
#endif
activeThreadNumStep0 = triangleCtrlPointNum * triangleNum;
blockNumStep0 = ceil(static_cast<double>(activeThreadNumStep0) / blockSizeStep0);
activeThreadNumStep1 = constrait_point_num * triangleNum;
blockNumStep1 = ceil(static_cast<double>(activeThreadNumStep1) / blockSizeStep1);
activeThreadNumAdjNormal = triangleNum * 3;
blockNumAdjNormal = ceil(static_cast<double>(activeThreadNumAdjNormal) / blockSizeAdjNormal);
blockNumStep0_PN = ceil(static_cast<double>(3 * triangleNum) / blockSizeStep0_PN);
#ifdef TRUTH
extern float matrixTriangle[9][55*55];
float *temp = new float[triangleCtrlPointNum * triangleCtrlPointNum];
for (int i = 0; i < triangleCtrlPointNum; ++i)
{
for (int j = 0; j < triangleCtrlPointNum; ++j)
{
temp[index2c(i, j, triangleCtrlPointNum)] = matrixTriangle[degree - 1][i * triangleCtrlPointNum + j];
}
}
hipMalloc(&B_1D_truth, sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum);
degreeMemD += sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum, "@(B-1)T");
hipMemcpy(B_1D_truth, temp, sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum, hipMemcpyHostToDevice);
delete temp;
#endif
/***************************************************************************/
extern int matrixFittingIdx[100];
matrixStartIdxFitting = matrixFittingIdx[degree * 10 + degree_lower];
cout << "triangleNum = " << triangleNum << endl;
cout << "degree = " << degree << ", degree_lower = " << degree_lower << ", constrait_point_num = " << constrait_point_num << endl;
cout << "triangleCtrlPointNum = " << triangleCtrlPointNum << ", triangleCtrlPointNum_lower = " << triangleCtrlPointNum_lower << endl;
cout << "activeThreadNumStep1 = " << activeThreadNumStep1 << ", blockNumStep1 = " << blockNumStep1 << endl;
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
triangular_ctrl_points = new float[3 * triangleCtrlPointNum_lower * triangleNum];
#endif
}
double power(double a, int n)
{
if (n <= 0)
return 1.0;
double result = a;
for (int i = 1; i < n; ++i)
result *= a;
return result;
}
int factorial(int n)
{
int result = 1;
for (int i = 1; i <= n; ++i)
result *= i;
return result;
}
float B(double u, double v, double w, int n, int3 c)
{
return factorial(n) / factorial(c.x) / factorial(c.y) / factorial(c.z) * power(u, c.x) * power(v, c.y) * power(w, c.z);
}
float *BqD, *BqD_PN, *BBD, *RD;
int *my_to_truth_tableD;
#ifdef TRUTH
float *BqD_truth, *BBD_truth, *RD_truth;
#endif
int segmentPerEdge, samplePointPerTriangle;
int blockSizeCopy = 256, activeThreadNumCopy, blockNumCopy;
int *my_to_truth_table;
void generateUVW(int samplePointPerEdge)
{
segmentPerEdge = samplePointPerEdge - 1;
samplePointPerTriangle = (samplePointPerEdge + 1) * samplePointPerEdge / 2;
activeThreadNumCopy = samplePointPerTriangle * triangleNum;
blockNumCopy = ceil(static_cast<double>(activeThreadNumCopy) / blockSizeCopy);
double *a = new double[samplePointPerTriangle * 3];
int idx = 0;
for (int i = segmentPerEdge; i >= 0; --i)
{
for (int j = segmentPerEdge - i; j >= 0; --j)
{
int k = segmentPerEdge - i - j;
a[idx++] = (double)i / segmentPerEdge;
a[idx++] = (double)j / segmentPerEdge;
a[idx++] = (double)k / segmentPerEdge;
}
}
float *b = new float[samplePointPerTriangle * triangleCtrlPointNum_lower];
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = degree_lower; i >= 0; --i)
{
for (int j = degree_lower - i; j >= 0; --j)
{
int k = degree_lower - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row, idx, samplePointPerTriangle)] = B(u, v, w, degree_lower, make_int3(i, j, k));
++idx;
}
}
}
float *b_PN = new float[samplePointPerTriangle * 6];
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = 2; i >= 0; --i)
{
for (int j = 2 - i; j >= 0; --j)
{
int k = 2 - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b_PN[index2c(row, idx, samplePointPerTriangle)] = B(u, v, w, 2, make_int3(i, j, k));
++idx;
}
}
}
hipMalloc(&BqD, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower, "@Bq");
hipMemcpy(BqD, b, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower, hipMemcpyHostToDevice);
hipMalloc(&BqD_PN, sizeof(float) * samplePointPerTriangle * 6);
tessMemD += sizeof(float) * samplePointPerTriangle * 6;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * 6, "@Bq");
hipMemcpy(BqD_PN, b_PN, sizeof(float) * samplePointPerTriangle * 6, hipMemcpyHostToDevice);
/***********************************************************************************************************************************/
hipMalloc(&BBD, sizeof(float) * samplePointPerTriangle * (triangleCtrlPointNum + constrait_point_num));
tessMemD += sizeof(float) * samplePointPerTriangle * (triangleCtrlPointNum + constrait_point_num);
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * (triangleCtrlPointNum + constrait_point_num), "@BB");
hipMalloc(&RD, sizeof(float) * samplePointPerTriangle * triangleNum * 6);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleNum * 6;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleNum * 6, "@RD");
delete []a;
delete []b;
hipMalloc(&my_to_truth_tableD, sizeof(int) * samplePointPerTriangle * triangleNum);
my_to_truth_table = new int[samplePointPerTriangle * triangleNum];
fill(my_to_truth_table, my_to_truth_table + samplePointPerTriangle * triangleNum, 0);
}
#ifdef TRUTH
void generateUVW_truth(int samplePointPerEdge)
{
double *a = new double[samplePointPerTriangle * 3];
int idx = 0;
for (int i = segmentPerEdge; i >= 0; --i)
{
for (int j = segmentPerEdge - i; j >= 0; --j)
{
int k = segmentPerEdge - i - j;
a[idx++] = (double)i / segmentPerEdge;
a[idx++] = (double)j / segmentPerEdge;
a[idx++] = (double)k / segmentPerEdge;
}
}
float *b = new float[samplePointPerTriangle * triangleCtrlPointNum * 3];
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = degree; i >= 0; --i)
{
for (int j = degree - i; j >= 0; --j)
{
int k = degree - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row, idx, samplePointPerTriangle * 3)] = B(u, v, w, degree, make_int3(i, j, k));
//b[row * triangleCtrlPointNum + idx] = B(u, v, w, degree, make_int3(i, j, k));
++idx;
}
}
}
/***********************************************************************************************************************************/
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = degree; i >= 0; --i)
{
for (int j = degree - i; j >= 0; --j)
{
int k = degree - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row + samplePointPerTriangle, idx, samplePointPerTriangle * 3)] = factorial(degree) / (factorial(i) * factorial(j) * factorial(k)) *
(i * power(u, i - 1) * power(v, j) * power(w, k) - k * power(u, i) * power(v, j) * power(w, k - 1));
++idx;
}
}
}
/***********************************************************************************************************************************/
for (int row = 0; row < samplePointPerTriangle; ++row)
//for (int row = samplePointPerTriangle * 2; row < samplePointPerTriangle * 3; ++row)
{
int idx = 0;
for (int i = degree; i >= 0; --i)
{
for (int j = degree - i; j >= 0; --j)
{
int k = degree - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row + samplePointPerTriangle * 2, idx, samplePointPerTriangle * 3)] = factorial(degree) / (factorial(i) * factorial(j) * factorial(k)) *
(j * power(u, i) * power(v, j - 1) * power(w, k) - k * power(u, i) * power(v, j) * power(w, k - 1));
++idx;
}
}
}
hipMalloc(&BqD_truth, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3, "@Bq");
hipMemcpy(BqD_truth, b, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3, hipMemcpyHostToDevice);
/***********************************************************************************************************************************/
hipMalloc(&BBD_truth, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3, "@BB");
hipMalloc(&RD_truth, sizeof(float) * samplePointPerTriangle * 3 * triangleNum * 3);
tessMemD += sizeof(float) * samplePointPerTriangle * 3 * triangleNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * 3 * triangleNum * 3, "@RD");
delete []a;
delete []b;
}
#endif
/*
* B uvw
* B
* truth
*/
__device__ float3 BSplineVolumeValueMatrixD2(float *Mu, float *Mv, float *Mw,
float u, float v, float w, float *shared_array,
int leftUIdx, int leftVIdx, int leftWIdx,
int orderU, int orderV, int orderW)
{
#define NB // NBdefine NB
#ifdef NB
float *mul1 = (float *)shared_array;
float *mul2 = (float *)&mul1[blockDim.x * 4];
float *temp = (float *)&mul2[blockDim.x * 4];
//
temp[3 * threadIdx.x + 0] = w;
temp[3 * threadIdx.x + 1] = w * w;
temp[3 * threadIdx.x + 2] = w * w * w;
for (int i = 0; i < orderW; ++i)
{
mul1[4 * threadIdx.x + i] = Mw[i];
for (int j = 1; j < orderW; ++j)
mul1[4 * threadIdx.x + i] += temp[3 * threadIdx.x + j - 1] * Mw[j * orderW + i];
}
//
temp[3 * threadIdx.x + 0] = v;
temp[3 * threadIdx.x + 1] = v * v;
temp[3 * threadIdx.x + 2] = v * v * v;
for (int i = 0; i < orderV; ++i)
{
mul2[4 * threadIdx.x + i] = Mv[i];
for (int j = 1; j < orderV; ++j)
mul2[4 * threadIdx.x + i] += temp[3 * threadIdx.x + j - 1] * Mv[j * orderV + i];
}
float3 tempCtrlPoint2[4];
float3 tempCtrlPoint1[4];
for (int i = 0; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
tempCtrlPoint2[j] = make_float3(0.0f, 0.0f, 0.0f);
for (int k = 0; k < orderW; ++k)
{
float3 cp = ctrlPointD[leftUIdx - i][leftVIdx - j][leftWIdx - k];
tempCtrlPoint2[j] += cp * mul1[4 * threadIdx.x + orderW - 1 - k];
}
}
tempCtrlPoint1[i] = make_float3(0.0f, 0.0f, 0.0f);
for (int j = 0; j < orderV; ++j)
tempCtrlPoint1[i] += tempCtrlPoint2[j] * mul2[4 * threadIdx.x + orderV - 1 - j];
}
//
temp[3 * threadIdx.x + 0] = u;
temp[3 * threadIdx.x + 1] = u * u;
temp[3 * threadIdx.x + 2] = u * u * u;
for (int i = 0; i < orderU; ++i)
{
mul1[4 * threadIdx.x + i] = Mu[i];
for (int j = 1; j < orderU; ++j)
mul1[4 * threadIdx.x + i] += temp[3 * threadIdx.x + j - 1] * Mu[j * orderU + i];
}
float3 result = make_float3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < orderU; ++i)
result += tempCtrlPoint1[i] * mul1[4 * threadIdx.x + orderU - 1 - i];
return result;
/*-------------------------------------------------------------------------------------------------*/
#else
//
int base_i = leftUIdx - orderU + 1;
int base_j = leftVIdx - orderV + 1;
int base_k = leftWIdx - orderW + 1;
float3 box[4][4][4], temp;
for (int k = 0; k < orderW; ++k)
for (int i = 0; i < orderU; ++i)
for (int j = 0; j < orderV; ++j)
{
temp = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < orderU; ++l)
{
float3 cp = ctrlPointD[base_i + l][base_j + j][base_k + k];
temp += Mu[i * orderU + l] * cp;
}
box[i][j][k] = temp;
}
//
float3 box1[4][4][4];
for (int i = 0; i < orderU; ++i)
for (int j = 0; j < orderV; ++j)
for (int k = 0; k < orderW; ++k)
{
temp = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < orderV; ++l)
{
float3 cp = box[i][l][k];
temp += Mv[j * orderV + l] * cp;
}
box1[i][j][k] = temp;
}
//
for (int j = 0; j < orderV; ++j)
for (int k = 0; k < orderW; ++k)
for (int i = 0; i < orderU; ++i)
{
temp = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < orderW; ++l)
{
float3 cp = box1[i][j][l];
temp += Mw[k * orderW + l] * cp;
}
box[i][j][k] = temp;
}
//
float t[4];
t[0] = 1.0f;
t[1] = u;
t[2] = u * u;
t[3] = t[2] * u;
float3 cp2D[4][4];
for (int j = 0; j < orderV; ++j)
for (int k = 0; k < orderW; ++k)
{
cp2D[j][k] = make_float3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < orderU; ++i)
{
cp2D[j][k] += t[i] * box[i][j][k];
}
}
//
t[1] = v;
t[2] = v * v;
t[3] = t[2] * v;
float3 cp1D[4];
for (int k = 0; k < orderW; ++k)
{
cp1D[k] = make_float3(0.0f, 0.0f, 0.0f);
for (int j = 0; j < orderV; ++j)
cp1D[k] += t[j] * cp2D[j][k];
}
//
t[1] = w;
t[2] = w * w;
t[3] = t[2] * w;
temp = make_float3(0.0f, 0.0f, 0.0f);
for (int k = 0; k < orderW; ++k)
temp += t[k] * cp1D[k];
return temp;
#endif
}
/* */
__device__ void BSplineVolumeValueMatrixD_combine(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW,
float3 &f, float3 &fu, float3 &fv)
{
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
float *tu = &shared_array[base3];
float *tu_ = &shared_array[blockDim.x * 3 + base2];
float *tv = &shared_array[blockDim.x * 5 + base3];
float *tv_ = &shared_array[blockDim.x * 8 + base2];
float *tw = &shared_array[blockDim.x * 10 + base3];
tu[0] = u; tu[1] = u * u, tu[2] = u * tu[1];
tu_[0] = 2 * u; tu_[1] = 3 * tu[1];
tv[0] = v; tv[1] = v * v; tv[2] = v * tv[1];
tv_[0] = 2 * v; tv_[1] = 3 * tv[1];
tw[0] = w; tw[1] = w * w; tw[2] = w * tw[1];
/************* i = 0 tutu_ ****************/
/******** orderU2i = 0for *********/
float3 cp2D[4];
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][0][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][0][j][k];
}
// orderV2tv[0] * cp2D[1]for
float3 cp1D = cp2D[0] + tv[0] * cp2D[1], cp1Dv = cp2D[1];
for (int j = 2; j < orderV; ++j)
{
cp1D += tv[j - 1] * cp2D[j];
cp1Dv += tv_[j - 2] * cp2D[j];
}
f = cp1D;
fv = cp1Dv;
/*************** i = 1 tu_ ******************/
/******** orderU2i = 1for *********/
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][1][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][1][j][k];
}
// orderV2tv[0] * cp2D[1]for
cp1D = cp2D[0] + tv[0] * cp2D[1];
cp1Dv = cp2D[1];
for (int j = 2; j < orderV; ++j)
{
cp1D += tv[j - 1] * cp2D[j];
cp1Dv += tv_[j - 2] * cp2D[j];
}
f += tu[0] * cp1D;
fu = cp1D;
fv += tu[0] * cp1Dv;
/*********************************************************************/
for (int i = 2; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][i][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
// orderV2tv[0] * cp2D[1]for
cp1D = cp2D[0] + tv[0] * cp2D[1];
cp1Dv = cp2D[1];
for (int j = 2; j < orderV; ++j)
{
cp1D += tv[j - 1] * cp2D[j];
cp1Dv += tv_[j - 2] * cp2D[j];
}
f += tu[i - 1] * cp1D;
fu += tu_[i - 2] * cp1D;
fv += tu[i - 1] * cp1Dv;
}
}
/* u */
__device__ float3 BSplineVolumeValueMatrixDu(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW)
{
float *tu = (float *)shared_array;
float *tv = (float *)&tu[blockDim.x * 2];
float *tw = (float *)&tv[blockDim.x * 3];
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
tu[base2] = 2 * u; tu[base2 + 1] = 3 * u * u;
tv[base3] = v; tv[base3 + 1] = v * v; tv[base3 + 2] = v * v * v;
tw[base3] = w; tw[base3 + 1] = w * w, tw[base3 + 2] = w * w * w;
// ->->->
float3 cp2D[4], cp1D, result;
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][1][j][0];
for (int k = 1; k < orderU; ++k)
cp2D[j] += tw[base3 + k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][1][j][k];
}
cp1D = cp2D[0];
for (int j = 1; j < orderV; ++j)
cp1D += tv[base3 + j - 1] * cp2D[j];
result = cp1D;
// tu[3][2]i=0
for (int i = 2; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][i][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[base3 + k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
cp1D = cp2D[0];
for (int j = 1; j < orderV; ++j)
cp1D += tv[base3 + j - 1] * cp2D[j];
result += tu[base2 + i - 2] * cp1D;
}
return result;
/*-----------------------------------------------------*/
/*float tu[4], tv[4], tw[4];*/
/*tu[0] = 0; tu[1] = 1; tu[2] = 2 * u; tu[3] = 3 * u * u;*/
/*tv[0] = 1; tv[1] = v; tv[2] = v * v; tv[3] = v * v * v;*/
/*tw[0] = 1; tw[1] = w; tw[2] = w * w, tw[3] = w * w * w;*/
/*// ->->->*/
/*float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int k = 0; k < orderW; ++k)*/
/*{*/
/*for (int j = 0; j < orderV; ++j)*/
/*{*/
/*cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int i = 0; i < orderU; ++i)*/
/*cp2D[j] += tu[i] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];*/
/*}*/
/*cp1D = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int j = 0; j < orderV; ++j)*/
/*cp1D += tv[j] * cp2D[j];*/
/*result += tw[k] * cp1D;*/
/*}*/
/*return result;*/
}
/* v */
__device__ float3 BSplineVolumeValueMatrixDv(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW)
{
float *tu = (float *)shared_array;
float *tv = (float *)&tu[blockDim.x * 3];
float *tw = (float *)&tv[blockDim.x * 2];
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
tu[base3] = u; tu[base3 + 1] = u * u; tu[base3 + 2] = u * u * u;
tv[base2] = 2 * v; tv[base2 + 1] = 3 * v * v;
tw[base3] = w; tw[base3 + 1] = w * w, tw[base3 + 2] = w * w * w;
// ->->->
float3 cp2D[4], cp1D, result;
for (int k = 0; k < orderW; ++k)
{
cp2D[k] = newCtrlPointD[i_idx][j_idx][k_idx][0][1][k];
for (int i = 1; i < orderU; ++i)
cp2D[k] += tu[base3 + i - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][1][k];
}
cp1D = cp2D[0];
for (int k = 1; k < orderW; ++k)
cp1D += tw[base3 + k - 1] * cp2D[k];
result = cp1D;
// tv[3][2]j=0
for (int j = 2; j < orderV; ++j)
{
for (int k = 0; k < orderW; ++k)
{
cp2D[k] = newCtrlPointD[i_idx][j_idx][k_idx][0][j][k];
for (int i = 1; i < orderU; ++i)
cp2D[k] += tu[base3 + i - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
cp1D = cp2D[0];
for (int k = 1; k < orderW; ++k)
cp1D += tw[base3 + k - 1] * cp2D[k];
result += tv[base2 + j - 2] * cp1D;
}
return result;
/*-----------------------------------------------------*/
/*float tu[4], tv[4], tw[4];*/
/*tu[0] = 1; tu[1] = u; tu[2] = u * u; tu[3] = u * u * u;*/
/*tv[0] = 0; tv[1] = 1; tv[2] = 2 * v; tv[3] = 3 * v * v;*/
/*tw[0] = 1; tw[1] = w; tw[2] = w * w, tw[3] = w * w * w;*/
/*// ->->->*/
/*float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int k = 0; k < orderW; ++k)*/
/*{*/
/*for (int j = 0; j < orderV; ++j)*/
/*{*/
/*cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int i = 0; i < orderU; ++i)*/
/*cp2D[j] += tu[i] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];*/
/*}*/
/*cp1D = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int j = 0; j < orderV; ++j)*/
/*cp1D += tv[j] * cp2D[j];*/
/*result += tw[k] * cp1D;*/
/*}*/
/*return result;*/
}
/* w */
__device__ float3 BSplineVolumeValueMatrixDw(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW)
{
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
float *tu = &shared_array[base3];
float *tv = &shared_array[blockDim.x * 3 + base3];
float *tw = &shared_array[blockDim.x * 6 + base2];
tu[0] = u; tu[1] = u * u; tu[2] = u * tu[1];
tv[0] = v; tv[1] = v * v; tv[2] = v * tv[1];
tw[0] = 2 * w; tw[1] = 3 * w * w;
float3 cp2D[4], cp1D, result;
for (int i = 0; i < orderU; ++i)
{
cp2D[i] = newCtrlPointD[i_idx][j_idx][k_idx][i][0][1];
for (int j = 1; j < orderV; ++j)
cp2D[i] += tv[j - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][1];
}
cp1D = cp2D[0];
for (int i = 1; i < orderU; ++i)
cp1D += tu[i - 1] * cp2D[i];
result = cp1D;
// tw[3][2]k = 1
for (int k = 2; k < orderW; ++k)
{
for (int i = 0; i < orderU; ++i)
{
cp2D[i] = newCtrlPointD[i_idx][j_idx][k_idx][i][0][k];
for (int j = 1; j < orderV; ++j)
cp2D[i] += tv[j - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
cp1D = cp2D[0];
for (int i = 1; i < orderU; ++i)
cp1D += tu[i - 1] * cp2D[i];
result += tw[k - 2] * cp1D;
}
return result;
/*-----------------------------------------------------*/
//float tu[4], tv[4], tw[4];
//tu[0] = 1; tu[1] = u; tu[2] = u * u; tu[3] = u * u * u;
//tv[0] = 1; tv[1] = v; tv[2] = v * v, tv[3] = v * v * v;
//tw[0] = 0; tw[1] = 1; tw[2] = 2 * w; tw[3] = 3 * w * w;
//float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);
//for (int i = 0; i < orderU; ++i)
//{
//for (int j = 0; j < orderV; ++j)
//{
//cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);
//for (int k = 0; k < orderW; ++k)
//cp2D[j] += tw[k] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
//}
//cp1D = make_float3(0.0f, 0.0f, 0.0f);
//for (int j = 0; j < orderV; ++j)
//cp1D += tv[j] * cp2D[j];
//result += tu[i] * cp1D;
//}
//return result;
/*-----------------------------------------------------*/
//
/*float tu[4], tv[4], tw[4];*/
/*tu[0] = 1; tu[1] = u; tu[2] = u * u; tu[3] = u * u * u;*/
/*tv[0] = 1; tv[1] = v; tv[2] = v * v, tv[3] = v * v * v;*/
/*tw[0] = 0; tw[1] = 1; tw[2] = 2 * w; tw[3] = 3 * w * w;*/
/*float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int k = 0; k < orderW; ++k)*/
/*{*/
/*for (int j = 0; j < orderV; ++j)*/
/*{*/
/*cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int i = 0; i < orderU; ++i)*/
/*cp2D[j] += tu[i] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];*/
/*}*/
/*cp1D = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int j = 0; j < orderV; ++j)*/
/*cp1D += tv[j] * cp2D[j];*/
/*result += tw[k] * cp1D;*/
/*}*/
/*return result;*/
}
__global__ void calcCtrlPoint_PN(TriangleD *triangleListD, int *triangle_adjacent_tableD, float3 *sampleValueD_PN, float *triangleCtrlPointD_PN, float *triangleNormalCtrlPointD_PN, int f, int m_)
{
int triangleIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (triangleIdx >= f)
return;
int adj_face_idx[3];
adj_face_idx[0] = triangle_adjacent_tableD[triangleIdx * 3];
adj_face_idx[1] = triangle_adjacent_tableD[triangleIdx * 3 + 1];
adj_face_idx[2] = triangle_adjacent_tableD[triangleIdx * 3 + 2];
//int adj_edge_idx[3] = { -1, -1, -1 }; //
int adj_edge_idx[3] = { 0, 0, 0 }; //
//bool handle[3] = { false, false, false }; //
int adj_corner_ctrlpoint_idx[3][2] = { { 0, 2 }, { 1, 0 }, { 2, 1 } }; // 0, 1, 2
for (int i = 0; i < 3; ++i)
if (adj_face_idx[i] >= 0)
{
adj_edge_idx[i] = adj_face_idx[i] & 0x3;
adj_face_idx[i] = adj_face_idx[i] >> 2;
//handle[i] = true;
}
//printf("edge_id = (%d, %d, %d), face_id = (%d, %d, %d)\n", adj_edge_idx[0], adj_edge_idx[1], adj_edge_idx[2],
//adj_face_idx[0], adj_face_idx[1], adj_face_idx[2]);
int n_count[3];
n_count[0] = triangleListD[triangleIdx].nc[0];
n_count[1] = triangleListD[triangleIdx].nc[1];
n_count[2] = triangleListD[triangleIdx].nc[2];
float *p_x = &triangleCtrlPointD_PN[m_ * triangleIdx];
float *p_y = &triangleCtrlPointD_PN[m_ * (f + triangleIdx)];
float *p_z = &triangleCtrlPointD_PN[m_ * (f * 2 + triangleIdx)];
float3 v0 = sampleValueD_PN[triangleIdx * 3];
float3 v1 = sampleValueD_PN[triangleIdx * 3 + 1];
float3 v2 = sampleValueD_PN[triangleIdx * 3 + 2];
float3 n0 = sampleValueD_PN[(f + triangleIdx) * 3];
float3 n1 = sampleValueD_PN[(f + triangleIdx) * 3 + 1];
float3 n2 = sampleValueD_PN[(f + triangleIdx) * 3 + 2];
normalize(n0);
normalize(n1);
normalize(n2);
/*********************** **********************/
p_x[0] = v0.x; p_y[0] = v0.y; p_z[0] = v0.z; // 0
p_x[6] = v1.x; p_y[6] = v1.y; p_z[6] = v1.z; // 6
p_x[9] = v2.x; p_y[9] = v2.y; p_z[9] = v2.z; // 9
float3 e = make_float3(0.0f, 0.0f, 0.0f);
float3 v01 = v1 - v0;
float3 result;
if (n_count[1] < 2) //
{
result = (v0 * 2 + v1 - n0 * (v01 * n0)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[1]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[1]][0]];
float3 n_ave = cross(n0, n_oppo);
normalize(n_ave);
result = v0 + v01 * n_ave / 3 * n_ave;
}
e += result;
p_x[1] = result.x; p_y[1] = result.y; p_z[1] = result.z; // 1
float3 v02 = v2 - v0;
if (n_count[0] < 2) //
{
result = (v0 * 2 + v2 - n0 * (v02 * n0)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[0]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[0]][1]];
float3 n_ave = cross(n0, n_oppo);
normalize(n_ave);
result = v0 + v02 * n_ave / 3 * n_ave;
}
e += result;
p_x[2] = result.x; p_y[2] = result.y; p_z[2] = result.z; // 2
float3 v10 = v0 - v1;
if (n_count[1] < 2) //
{
result = (v1 * 2 + v0 - n1 * (v10 * n1)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[1]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[1]][1]];
float3 n_ave = cross(n1, n_oppo);
normalize(n_ave);
result = v1 + v10 * n_ave / 3 * n_ave;
}
e += result;
p_x[3] = result.x; p_y[3] = result.y; p_z[3] = result.z; // 3
float3 v12 = v2 - v1;
if (n_count[2] < 2) //
{
result = (v1 * 2 + v2 - n1 * (v12 * n1)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[2]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[2]][0]];
float3 n_ave = cross(n1, n_oppo);
normalize(n_ave);
result = v1 + v12 * n_ave / 3 * n_ave;
}
e += result;
p_x[7] = result.x; p_y[7] = result.y; p_z[7] = result.z; // 7
float3 v20 = v0 - v2;
if (n_count[0] < 2) //
{
result = (v2 * 2 + v0 - n2 * (v20 * n2)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[0]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[0]][0]];
float3 n_ave = cross(n2, n_oppo);
normalize(n_ave);
result = v2 + v20 * n_ave / 3 * n_ave;
}
e += result;
p_x[5] = result.x; p_y[5] = result.y; p_z[5] = result.z; // 5
float3 v21 = v1 - v2;
if (n_count[2] < 2) //
{
result = (v2 * 2 + v1 - n2 * (v21 * n2)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[2]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[2]][1]];
float3 n_ave = cross(n2, n_oppo);
normalize(n_ave);
result = v2 + v21 * n_ave / 3 * n_ave;
}
e += result;
p_x[8] = result.x; p_y[8] = result.y; p_z[8] = result.z; // 8
e /= 6;
float3 v_total = (v0 + v1 + v2) / 3;
result = e + (e - v_total) / 2;
p_x[4] = result.x; p_y[4] = result.y; p_z[4] = result.z; // 4
/*********************** **********************/
p_x = &triangleNormalCtrlPointD_PN[6 * triangleIdx];
p_y = &triangleNormalCtrlPointD_PN[6 * (f + triangleIdx)];
p_z = &triangleNormalCtrlPointD_PN[6 * (f * 2 + triangleIdx)];
p_x[0] = n0.x; p_y[0] = n0.y; p_z[0] = n0.z; // 0
p_x[3] = n1.x; p_y[3] = n1.y; p_z[3] = n1.z; // 3
p_x[5] = n2.x; p_y[5] = n2.y; p_z[5] = n2.z; // 5
float value01 = 2 * v01 * (n0 + n1) / (v01 * v01);
result = n0 + n1 - value01 * v01;
normalize(result);
p_x[1] = result.x; p_y[1] = result.y; p_z[1] = result.z; // 1
float value12 = 2 * v12 * (n1 + n2) / (v12 * v12);
result = n1 + n2 - value12 * v12;
normalize(result);
p_x[4] = result.x; p_y[4] = result.y; p_z[4] = result.z; // 4
float value20 = 2 * v20 * (n2 + n0) / (v20 * v20);
result = n2 + n0 - value20 * v20;
normalize(result);
p_x[2] = result.x; p_y[2] = result.y; p_z[2] = result.z; // 2
}
__global__ void calcSampleValueThread_PN(TriangleD *triangleListD, float3 *sampleValueD_PN,
int f, int n, int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= 3 * f)
return;
int triangleIdx = globalIdx / 3;
TriangleD &triangle = triangleListD[triangleIdx];
int localIdx = globalIdx % 3;
float3 vertex = triangle.v[localIdx];
float u = vertex.x;
float v = vertex.y;
float w = vertex.z;
//float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) * 0.5;
//int floor = rintf(tempFloorFloat);
//if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
//floor = ceilf(tempFloorFloat);
//int room = localIdx - (floor + 1) * floor * 0.5;
//float3 barycentric_coord;
//barycentric_coord.x = (float)(n - floor) / n;
//barycentric_coord.y = (float)(floor - room) / n;
//barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
//float3 v0 = triangle.v[0];
//float3 v1 = triangle.v[1];
//float3 v2 = triangle.v[2];
//// u, v, w x, y, z
//float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
//float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
//float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
// u, v, w
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
//
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// U
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// V
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// W
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// B
// fu J_barF_bar_xuF_bar_yuF_bar_zu
// fv J_barF_bar_xvF_bar_yvF_bar_zv
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
//sampleValueD[index2c(localIdx, triangleIdx , 3)] = result.x;
//sampleValueD[index2c(localIdx, triangleIdx + f , 3)] = result.y;
//sampleValueD[index2c(localIdx, triangleIdx + f * 2, 3)] = result.z;
sampleValueD_PN[3 * triangleIdx + localIdx].x = result.x;
sampleValueD_PN[3 * triangleIdx + localIdx].y = result.y;
sampleValueD_PN[3 * triangleIdx + localIdx].z = result.z;
//printf("%d: result = (%f, %f, %f)\n", globalIdx, result.x, result.y, result.z);
//printf("%d: result = (%f, %f, %f)\n", threadIdx.x, result.x, result.y, result.z);
///////////////////////////////////////////////////////////////////////////////
// fw J_barF_bar_xwF_bar_ywF_bar_zw
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
//v0 = triangle.n[0];
//v1 = triangle.n[1];
//v2 = triangle.n[2];
//// u, v, w x, y, z
//u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
//v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
//w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
vertex = triangle.n[localIdx];
u = vertex.x;
v = vertex.y;
w = vertex.z;
float3 *sampleNormalD_PN = sampleValueD_PN + 3 * f;
// J_bar_star_T_[012]J_bar(J_bar*T)
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
//sampleNormalD[index2c(localIdx, triangleIdx, 3)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
sampleNormalD_PN[3 * triangleIdx + localIdx].x = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
sampleNormalD_PN[3 * triangleIdx + localIdx].y = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
sampleNormalD_PN[3 * triangleIdx + localIdx].z = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
__global__ void calcSampleValueThread(TriangleD *triangleListD, float *sampleValueD,
int activeThreadNum, int m, int f, int c, int n,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNum)
return;
int triangleIdx = globalIdx / m;
TriangleD &triangle = triangleListD[triangleIdx];
int localIdx = globalIdx % m;
float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) * 0.5;
int floor = rintf(tempFloorFloat);
if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
floor = ceilf(tempFloorFloat);
int room = localIdx - ((floor + 1) * floor >> 1);
float3 barycentric_coord;
barycentric_coord.x = (float)(n - floor) / n;
barycentric_coord.y = (float)(floor - room) / n;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
float3 v0 = triangle.v[0];
float3 v1 = triangle.v[1];
float3 v2 = triangle.v[2];
// u, v, w x, y, z
float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
// u, v, w
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
//
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// U
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// V
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// W
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// B
// fu J_barF_bar_xuF_bar_yuF_bar_zu
// fv J_barF_bar_xvF_bar_yvF_bar_zv
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
sampleValueD[index2c(localIdx, triangleIdx , m + c)] = result.x;
sampleValueD[index2c(localIdx, triangleIdx + f , m + c)] = result.y;
sampleValueD[index2c(localIdx, triangleIdx + f * 2, m + c)] = result.z;
///////////////////////////////////////////////////////////////////////////////
// fw J_barF_bar_xwF_bar_ywF_bar_zw
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
v0 = triangle.n[0];
v1 = triangle.n[1];
v2 = triangle.n[2];
// u, v, w x, y, z
u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
float *sampleNormalD = sampleValueD + 3 * f * (m + c);
// J_bar_star_T_[012]J_bar(J_bar*T)
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
sampleNormalD[index2c(localIdx, triangleIdx, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
sampleNormalD[index2c(localIdx, triangleIdx + f, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
sampleNormalD[index2c(localIdx, triangleIdx + f * 2, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
__global__ void calcConstraitSampleValueThread(TriangleD *triangleListD, float *sampleValueD,
int activeThreadNum, int m, int f, int c, int n_,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNum)
return;
int triangleIdx = globalIdx / c;
TriangleD &triangle = triangleListD[triangleIdx];
int localIdx = globalIdx % c;
int floor = -1, room = -1;
if (localIdx < 2 * n_ - 1)
{
floor = (localIdx + 1) / 2;
if (localIdx % 2 == 1)
room = 0;
else
room = floor;
}
else
{
floor = n_;
room = localIdx - (2 * n_ - 1);
}
float3 barycentric_coord;
barycentric_coord.x = (float)(n_ - floor) / n_;
barycentric_coord.y = (float)(floor - room) / n_;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
float3 v0 = triangle.v[0];
float3 v1 = triangle.v[1];
float3 v2 = triangle.v[2];
// u, v, w x, y, z
float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
// u, v, w
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
//
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// U
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// V
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// W
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// B f
// fu J_barF_bar_xuF_bar_yuF_bar_zu
// fv J_barF_bar_xvF_bar_yvF_bar_zv
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
sampleValueD[index2c(localIdx + m, triangleIdx , m + c)] = result.x;
sampleValueD[index2c(localIdx + m, triangleIdx + f , m + c)] = result.y;
sampleValueD[index2c(localIdx + m, triangleIdx + f * 2, m + c)] = result.z;
////////////////////////////////////////////////////////////////////////////
// fw J_barF_bar_xwF_bar_ywF_bar_zw
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
v0 = triangle.n[0];
v1 = triangle.n[1];
v2 = triangle.n[2];
// u, v, w x, y, z
u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
float *sampleNormalD = sampleValueD + 3 * f * (m + c);
// J_bar_star_T_[012]J_bar(J_bar*T)
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
sampleNormalD[index2c(localIdx + m, triangleIdx, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
sampleNormalD[index2c(localIdx + m, triangleIdx + f, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
sampleNormalD[index2c(localIdx + m, triangleIdx + f * 2, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
__global__ void calcAdjustNormal(TriangleD *triangleListD, int f,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int triangleIdx = globalIdx / 3;
if (triangleIdx >= f)
return;
int i = globalIdx % 3;
float3 vertex = triangleListD[triangleIdx].v[i];
float u = vertex.x;
float v = vertex.y;
float w = vertex.z;
// u, v, w
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
//
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// U
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// V
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// W
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// fu J_barF_bar_xuF_bar_yuF_bar_zu
float3 fu = BSplineVolumeValueMatrixDu(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
__syncthreads();
// fv J_barF_bar_xvF_bar_yvF_bar_zv
float3 fv = BSplineVolumeValueMatrixDv(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
__syncthreads();
// fw J_barF_bar_xwF_bar_ywF_bar_zw
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
__syncthreads();
vertex = triangleListD[triangleIdx].n_adj_origin[i];
u = vertex.x;
v = vertex.y;
w = vertex.z;
// J_bar_star_T_[012]J_bar(J_bar*T)
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
triangleListD[triangleIdx].n_adj[i].x = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
triangleListD[triangleIdx].n_adj[i].y = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
triangleListD[triangleIdx].n_adj[i].z = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
void calcSampleValue(AlgorithmType algo_type)
{
if (algo_type == CYM)
{
//
//calcSampleValueThread<<<blockNumStep0, blockSizeStep0, sizeof(float) * blockSizeStep0 * 9>>>
hipLaunchKernelGGL(( calcSampleValueThread), dim3(blockNumStep0), dim3(blockSizeStep0), sizeof(float) * blockSizeStep0 * 13, 0,
triangleListD, sampleValueD,
activeThreadNumStep0, triangleCtrlPointNum, triangleNum, constrait_point_num,
degree, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
//
//calcConstraitSampleValueThread<<<blockNumStep1, blockSizeStep1, sizeof(float) * blockSizeStep1 * 9>>>
hipLaunchKernelGGL(( calcConstraitSampleValueThread), dim3(blockNumStep1), dim3(blockSizeStep1), sizeof(float) * blockSizeStep1 * 13, 0,
triangleListD, sampleValueD,
activeThreadNumStep1, triangleCtrlPointNum, triangleNum, constrait_point_num,
degree_lower, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
}
else
{
hipLaunchKernelGGL(( calcSampleValueThread_PN), dim3(blockNumStep0_PN), dim3(blockSizeStep0_PN), sizeof(float) * blockSizeStep1 * 13, 0,
triangleListD, sampleValueD_PN,
triangleNum, degree, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
}
//hipError_t error = hipGetLastError();
//if (error != 0)
//{
//cout << "0\t";
//printf("CUDA error: %s\n", hipGetErrorString(error));
//}
//error = hipGetLastError();
//if (error != 0)
//{
//cout << "\t";
//printf("CUDA error: %s\n", hipGetErrorString(error));
//}
//float3 *test = new float3[triangleNum * 3];
//for (int i = 0; i < triangleNum * 3; ++i)
//test[i] = make_float3(1.0f, 2.0f, 3.0f);
//hipMemcpy(test, sampleValueD_PN, sizeof(float3) * triangleNum * 3, hipMemcpyDeviceToHost);
//for (int i = 0; i < triangleNum; ++i)
//{
//cout << test[i * 3].x << ", " << test[i * 3].y << ", " << test[i * 3].z << endl;
//cout << test[i * 3 + 1].x << ", " << test[i * 3 + 1].y << ", " << test[i * 3 + 1].z << endl;
//cout << test[i * 3 + 2].x << ", " << test[i * 3 + 2].y << ", " << test[i * 3 + 2].z << endl;
//cout << "==============" << endl;
//}
//delete []test;
//float *test = new float[(triangleCtrlPointNum + constrait_point_num) * triangleNum * 6];
//hipMemcpy(test, sampleValueD, sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6, hipMemcpyDeviceToHost);
//float *n = test + (triangleCtrlPointNum + constrait_point_num) * triangleNum * 3;
//for (int i = 0; i < triangleNum; ++i)
//{
//cout << "i = " << i << endl;
//for (int j = 0; j < constrait_point_num; ++j)
//{
//cout << "\t" << j << " " << test[i * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< test[(i + triangleNum) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< test[(i + triangleNum * 2) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << "\t";
//cout << n[i * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< n[(i + triangleNum) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< n[(i + triangleNum * 2) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << endl;
//}
//}
//delete []test;
}
#ifdef TRUTH
__global__ void calcSampleValueThread_truth(TriangleD *triangleListD, float *sampleValueD_truth,
int activeThreadNum, int m, int f, int n,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNum)
return;
int triangleIdx = globalIdx / m;
int localIdx = globalIdx % m;
float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) / 2;
int floor = rintf(tempFloorFloat);
if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
floor = ceilf(tempFloorFloat);
int room = localIdx - (floor + 1) * floor / 2;
float3 barycentric_coord;
barycentric_coord.x = (float)(n - floor) / n;
barycentric_coord.y = (float)(floor - room) / n;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
TriangleD &triangle = triangleListD[triangleIdx];
float3 v0 = triangle.v[0];
float3 v1 = triangle.v[1];
float3 v2 = triangle.v[2];
float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
int i = (u - knotListD[0]) / (knotListD[orderU] - knotListD[0]);
int j = (v - knotListD[20 + 0]) / (knotListD[20 + orderV] - knotListD[20 + 0]);
int k = (w - knotListD[40 + 0]) / (knotListD[40 + orderW] - knotListD[40 + 0]);
if (i >= ctrlPointNumU + orderU - 2 * (orderU - 1) - 1)
--i;
if (j >= ctrlPointNumV + orderV - 2 * (orderV - 1) - 1)
--j;
if (k >= ctrlPointNumW + orderW - 2 * (orderW - 1) - 1)
--k;
/* block uvw B */
float *Mu = matrixCase(matrix_b_spline_d, orderU, ctrlPointNumU, i + orderU - 1);
float *Mv = matrixCase(matrix_b_spline_d, orderV, ctrlPointNumV, j + orderV - 1);
float *Mw = matrixCase(matrix_b_spline_d, orderW, ctrlPointNumW, k + orderW - 1);
float tmpKnot = knotListD[i + orderU - 1];
float tmpKnot1 = knotListD[i + orderU];
u = (u - tmpKnot) / (tmpKnot1 - tmpKnot);
tmpKnot = knotListD[20 + j + orderV - 1];
tmpKnot1 = knotListD[20 + j + orderV];
v = (v - tmpKnot) / (tmpKnot1 - tmpKnot);
tmpKnot = knotListD[40 + k + orderW - 1];
tmpKnot1 = knotListD[40 + k + orderW];
w = (w - tmpKnot) / (tmpKnot1 - tmpKnot);
extern __shared__ float shared_array[];
/* B */
float3 result = BSplineVolumeValueMatrixD2(Mu, Mv, Mw,
u, v, w, shared_array,
i + orderU - 1, j + orderV - 1, k + orderW - 1,
orderU, orderV, orderW);
sampleValueD_truth[index2c(localIdx, triangleIdx, m)] = result.x;
sampleValueD_truth[index2c(localIdx, triangleIdx + f, m)] = result.y;
sampleValueD_truth[index2c(localIdx, triangleIdx + f * 2, m)] = result.z;
}
void calcSampleValue_truth()
{
hipLaunchKernelGGL(( calcSampleValueThread_truth), dim3(blockNumStep0_truth), dim3(blockSizeStep0), sizeof(float) * blockSizeStep0 * 11, 0,
triangleListD, sampleValueD_truth,
activeThreadNumStep0_truth, triangleCtrlPointNum, triangleNum,
degree, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
}
#endif
/************************************************************************************************************/
#define NEW_MOVE // movePN
#ifdef NEW_MOVE
//
__global__ void move(TriangleD *triangleListD, float *triangleCtrlPointD, int *triangle_adjacent_tableD,
int m_, int f, float center_factor, bool use_pn)
{
int triangleIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (triangleIdx >= f)
return;
int adj_face_idx[3];
adj_face_idx[0] = triangle_adjacent_tableD[triangleIdx * 3];
adj_face_idx[1] = triangle_adjacent_tableD[triangleIdx * 3 + 1];
adj_face_idx[2] = triangle_adjacent_tableD[triangleIdx * 3 + 2];
//int adj_edge_idx[3] = { -1, -1, -1 }; //
int adj_edge_idx[3] = { 0, 0, 0 }; //
//bool handle[3] = { false, false, false }; //
for (int i = 0; i < 3; ++i)
if (adj_face_idx[i] >= 0)
{
adj_edge_idx[i] = adj_face_idx[i] & 0x3;
adj_face_idx[i] = adj_face_idx[i] >> 2;
//handle[i] = true;
}
int n_count[3];
n_count[0] = triangleListD[triangleIdx].nc[0];
n_count[1] = triangleListD[triangleIdx].nc[1];
n_count[2] = triangleListD[triangleIdx].nc[2];
//printf("ncount = (%d, %d, %d): triangleIdx = %d\n", n_count[0], n_count[1], n_count[2], triangleIdx);
float *p_x = &triangleCtrlPointD[m_ * triangleIdx];
float *p_y = &triangleCtrlPointD[m_ * (f + triangleIdx)];
float *p_z = &triangleCtrlPointD[m_ * (f * 2 + triangleIdx)];
int edge_ctrlpoint_idx[6] = { 5, 2, 1, 3, 7, 8 }; // (0,10 2,31 4,52)
int corner_ctrlpoint_idx[6] = { 9, 0, 0, 6, 6, 9 }; // (0,10 2,31 4,52)
int oppo_corner_ctrlpoint_idx[6] = { 0, 9, 6, 0, 9, 6 }; // (0,10 2,31 4,52)
//int adj_corner_ctrlpoint_idx[3][2] = { { 0, 9 }, { 6, 0 }, { 9, 6 } }; // 0, 1, 2
//int adj_edge_ctrlpoint_idx[3][2] = { { 2, 5 }, { 3, 1 }, { 8, 7 } }; // 0, 1, 2
int adjust_normal_idx[6] = { 2, 0, 0, 1, 1, 2 };
int adj_corner_ctrlpoint_idx[3][2] = { { 0, 2 }, { 1, 0 }, { 2, 1 } }; // 0, 1, 2
//const float ZERO = 10e-6;
float3 delta = make_float3(0.0f, 0.0f, 0.0f), sum = make_float3(0.0f, 0.0f, 0.0f);
// 0 1 25, 2, 1, 3, 7, 8
//printf("for, triangleIdx = %d\n", triangleIdx);
for (int i = 0; i < 6; ++i)
{
float3 v_ctrlpoint_corner = make_float3(*(p_x + corner_ctrlpoint_idx[i]), *(p_y + corner_ctrlpoint_idx[i]), *(p_z + corner_ctrlpoint_idx[i]));
float3 v_ctrlpoint_corner_oppo = make_float3(*(p_x + oppo_corner_ctrlpoint_idx[i]), *(p_y + oppo_corner_ctrlpoint_idx[i]), *(p_z + oppo_corner_ctrlpoint_idx[i]));
float3 v01 = v_ctrlpoint_corner_oppo - v_ctrlpoint_corner;
float3 v_mid = 0.5 * (v_ctrlpoint_corner + v_ctrlpoint_corner_oppo);
//float3 n_ctrlpoint_corner = make_float3(*(pn_x + corner_ctrlpoint_idx[i]), *(pn_y + corner_ctrlpoint_idx[i]), *(pn_z + corner_ctrlpoint_idx[i]));
float3 n_ctrlpoint_corner = triangleListD[triangleIdx].n_adj[adjust_normal_idx[i]];
normalize(n_ctrlpoint_corner);
// p
float3 p = make_float3(*(p_x + edge_ctrlpoint_idx[i]), *(p_y + edge_ctrlpoint_idx[i]), *(p_z + edge_ctrlpoint_idx[i]));
if (n_count[i / 2] < 2) //
{
//if (adj_face_idx[i / 2] >= 0) //
//{
float3 result = p - ((p - v_ctrlpoint_corner) * n_ctrlpoint_corner) * n_ctrlpoint_corner;
#ifdef RE_LENGTH
float len0 = length(result);
float3 result_vector = result - v_ctrlpoint_corner;
float l_origin = length(p - v_ctrlpoint_corner);
float l_current = length(result_vector);
result_vector *= l_origin / l_current;
result = v_ctrlpoint_corner + result_vector;
float len1 = length(result);
printf("delta_leng_1_normal = %f\n", len1 - len0);
#endif
delta += (result - p);
*(p_x + edge_ctrlpoint_idx[i]) = result.x;
*(p_y + edge_ctrlpoint_idx[i]) = result.y;
*(p_z + edge_ctrlpoint_idx[i]) = result.z;
sum += result;
//}
//printf("only one : result_%d = (%f, %f, %f)\n", edge_ctrlpoint_idx[i], result.x, result.y, result.z);
}
//else if (handle[i / 2]) //
else //
{
float3 n1 = triangleListD[adj_face_idx[i / 2]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[i / 2]][i % 2]];
//printf("else, triangleIdx = %d, adj_face = %d, cp = %d, n1 = (%f, %f, %f)\n", triangleIdx, adj_face_idx[i / 2], edge_ctrlpoint_idx[i], n1.x, n1.y, n1.z);
normalize(n1);
//if (use_pn)
//{
float3 n_ave = cross(n_ctrlpoint_corner, n1);
normalize(n_ave);
//printf("t = %d, n_cross = %f, %f, %f\n", triangleIdx, n_ave.x, n_ave.y, n_ave.z);
//float3 result = v_ctrlpoint_corner + v01 * n_ave * 0.333333 * n_ave; // pn1/3
float3 result = v_ctrlpoint_corner + ((p - v_ctrlpoint_corner) * n_ave) * n_ave; //
#ifdef RE_LENGTH
float len0 = length(result);
float3 result_vector = result - v_ctrlpoint_corner;
float l_origin = length(p - v_ctrlpoint_corner);
float l_current = length(result_vector);
result_vector *= l_origin / l_current;
result = v_ctrlpoint_corner + result_vector;
float len1 = length(result);
printf("delta_leng_pn = %f\n", len1 - len0);
#endif
delta += (result - p);
*(p_x + edge_ctrlpoint_idx[i]) = result.x;
*(p_y + edge_ctrlpoint_idx[i]) = result.y;
*(p_z + edge_ctrlpoint_idx[i]) = result.z;
sum += result;
//printf("2 : result_%d = (%f, %f, %f)\n", edge_ctrlpoint_idx[i], result.x, result.y, result.z);
//}
//else
//{
//float t0 = 1.2345f, t1 = 2.3456f;
//float3 center0, center1;
//bool t0_exist = false, t1_exist = false;
//if (fabs(n_ctrlpoint_corner * v01) > ZERO)
//{
//t0 = (v_mid - v_ctrlpoint_corner) * v01 / (n_ctrlpoint_corner * v01);
//center0 = v_ctrlpoint_corner + t0 * n_ctrlpoint_corner;
//t0_exist = true;
////if (triangleIdx == 10 && i == 5)
////{
////printf("n0 = (%f, %f, %f), t0 = %f, center0 = (%f, %f, %f)\n",
////n_ctrlpoint_corner.x, n_ctrlpoint_corner.y, n_ctrlpoint_corner.z, t0, center0.x, center0.y, center0.z);
////}
//}
//if (fabs(n1 * v01) > ZERO)
//{
//t1 = (v_mid - v_ctrlpoint_corner) * v01 / (n1 * v01);
//center1 = v_ctrlpoint_corner + t1 * n1;
//t1_exist = true;
////if (triangleIdx == 10 && i == 5)
////{
////printf("n1 = (%f, %f, %f), t1 = %f, center1 = (%f, %f, %f)\n",
////n1.x, n1.y, n1.z, t1, center1.x, center1.y, center1.z);
////}
//}
////printf("t0 = %f, t1 = %f, triangleIdx = %d, cp = %d\n", t0, t1, triangleIdx, edge_ctrlpoint_idx[i]);
//float3 center_mid;
//if (t0_exist && t1_exist) //
//{
//float3 center_delta = center0 - center1;
//float t = (v_ctrlpoint_corner - center0) * center_delta / (center_delta * center_delta);
//center_mid = center0 + t * center_delta;
//float3 rad0 = v_ctrlpoint_corner - center0;
//float r0 = sqrt(rad0.x * rad0.x + rad0.y * rad0.y + rad0.z * rad0.z);
//float3 rad1 = v_ctrlpoint_corner - center1;
//float r1 = sqrt(rad1.x * rad1.x + rad1.y * rad1.y + rad1.z * rad1.z);
////printf(", =%d, cp=%d, t = %f, r0 = %f, r1 = %f\n", triangleIdx, edge_ctrlpoint_idx[i], t, r0, r1);
//}
//else if (t0_exist) //
//{
//float t = (v_ctrlpoint_corner - center0) * n1 / (n1 * n1);
//center_mid = center0 + t * n1;
////printf(", =%d, cp=%d, n1 = (%f, %f, %f), t = %f\n", triangleIdx, edge_ctrlpoint_idx[i], n1.x, n1.y, n1.z, t);
//}
//else if (t1_exist) //
//{
//float t = (v_ctrlpoint_corner - center1) * n_ctrlpoint_corner / (n_ctrlpoint_corner * n_ctrlpoint_corner);
//center_mid = center1 + t * n_ctrlpoint_corner;
////printf(", =%d, cp=%d, t = %f\n", triangleIdx, edge_ctrlpoint_idx[i], t);
//}
//else //
//{
////printf(", =%d, cp=%d\n", triangleIdx, edge_ctrlpoint_idx[i]);
//continue;
//}
//float3 n_ave = v_ctrlpoint_corner - center_mid;
//normalize(n_ave);
//float3 result = p - ((p - v_ctrlpoint_corner) * n_ave) * n_ave;
////printf("t = %d, n_ave = %f, %f, %f\tp = %f, %f, %f\t, result=%f, %f, %f\n", triangleIdx, n_ave.x, n_ave.y, n_ave.z, p.x, p.y, p.z, result.x, result.y, result.z);
//#ifdef RE_LENGTH
//float len0 = length(result);
//float3 result_vector = result - v_ctrlpoint_corner;
//float l_origin = length(p - v_ctrlpoint_corner);
//float l_current = length(result_vector);
//result_vector *= l_origin / l_current;
//result = v_ctrlpoint_corner + result_vector;
//float len1 = length(result);
//printf("delta_leng_my = %f\n", len1 - len0);
//#endif
//delta += (result - p);
//*(p_x + edge_ctrlpoint_idx[i]) = result.x;
//*(p_y + edge_ctrlpoint_idx[i]) = result.y;
//*(p_z + edge_ctrlpoint_idx[i]) = result.z;
//sum += result;
//float3 n_pn = cross(n_ctrlpoint_corner, n1);
//normalize(n_pn);
//float3 result_pn = v_ctrlpoint_corner + ((p - v_ctrlpoint_corner) * n_pn) * n_pn;
//float3 del = result_pn - result;
//float dot = n_ave * n_pn;
////printf("del = %f, %f, %f\t\tdot = %f\n", del.x, del.y, del.z, dot);
////printf("2 : result_%d = (%f, %f, %f)\n", edge_ctrlpoint_idx[i], result.x, result.y, result.z);
//}
}
}
// 4
#ifdef LESS_THAN_2
if (n_count[0] < 2 && n_count[1] < 2 && n_count[2] < 2)
#endif
{
float3 p = make_float3(*(p_x + 4), *(p_y + 4), *(p_z + 4));
/******** PN-Triangle *********/
//sum *= 1.0 / 6;
//float3 result = sum + (sum - p) * 0.5;
/******** delta *********/
delta *= center_factor / 6;
//delta *= 1.5 / 6;
float3 result = p + delta;
/******** *********/
*(p_x + 4) = result.x;
*(p_y + 4) = result.y;
*(p_z + 4) = result.z;
//printf("result_4 = (%f, %f, %f)\n", result.x, result.y, result.z);
}
}
#else
__global__ void move(TriangleD *triangleListD, float *triangleCtrlPointD, float *triangleNormalCtrlPointD, int m_, int f)
{
int triangleIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (triangleIdx >= f)
return;
float *p_x = &triangleCtrlPointD[m_ * triangleIdx];
float *p_y = &triangleCtrlPointD[m_ * (f + triangleIdx)];
float *p_z = &triangleCtrlPointD[m_ * (f * 2 + triangleIdx)];
float *pn_x = &triangleNormalCtrlPointD[m_ * triangleIdx];
float *pn_y = &triangleNormalCtrlPointD[m_ * (f + triangleIdx)];
float *pn_z = &triangleNormalCtrlPointD[m_ * (f * 2 + triangleIdx)];
/******* 1 *******/
/*#define MOVE1*/ // MOVE1
#ifdef MOVE1
float3 v = triangleListD[triangleIdx].v[0];
float3 n = triangleListD[triangleIdx].n[0];
#else
float3 v = make_float3(*p_x, *p_y, *p_z);
float3 n = make_float3(*pn_x, *pn_y, *pn_z);
float length = sqrt(n.x * n.x + n.y * n.y + n.z * n.z);
normalize(n);
#endif
float3 p = make_float3(*(p_x + 1), *(p_y + 1), *(p_z + 1));
float3 result = p - ((p - v) * n) * n;
float3 delta = result - p;
/*if (threadIdx.x == 0)*/
/*{*/
/*printf("triangleIdx = %d\n", triangleIdx);*/
/*printf(" = (%f, %f, %f), = (%f, %f, %f), = (%f, %f, %f),\n = (%f, %f, %f), = (%f, %f, %f)\n",*/
/*p.x, p.y, p.z, n.x, n.y, n.z, v.x, v.y, v.z, result.x, result.y, result.z, delta.x, delta.y, delta.z);*/
/*}*/
*(p_x + 1) = result.x;
*(p_y + 1) = result.y;
*(p_z + 1) = result.z;
float3 sum = result;
// 2
p = make_float3(*(p_x + 2), *(p_y + 2), *(p_z + 2));
result = p - ((p - v) * n) * n;
*(p_x + 2) = result.x;
*(p_y + 2) = result.y;
*(p_z + 2) = result.z;
sum += result;
/******* 3 *******/
#ifdef MOVE1
v = triangleListD[triangleIdx].v[1];
n = triangleListD[triangleIdx].n[1];
#else
v = make_float3(*(p_x + 6), *(p_y + 6), *(p_z + 6));
n = make_float3(*(pn_x + 6), *(pn_y + 6), *(pn_z + 6));
normalize(n);
#endif
p = make_float3(*(p_x + 3), *(p_y + 3), *(p_z + 3));
result = p - ((p - v) * n) * n;
*(p_x + 3) = result.x;
*(p_y + 3) = result.y;
*(p_z + 3) = result.z;
sum += result;
// 7
p = make_float3(*(p_x + 7), *(p_y + 7), *(p_z + 7));
result = p - ((p - v) * n) * n;
*(p_x + 7) = result.x;
*(p_y + 7) = result.y;
*(p_z + 7) = result.z;
sum += result;
/******* 8 *******/
#ifdef MOVE1
v = triangleListD[triangleIdx].v[2];
n = triangleListD[triangleIdx].n[2];
#else
v = make_float3(*(p_x + 9), *(p_y + 9), *(p_z + 9));
n = make_float3(*(pn_x + 9), *(pn_y + 9), *(pn_z + 9));
normalize(n);
#endif
p = make_float3(*(p_x + 8), *(p_y + 8), *(p_z + 8));
result = p - ((p - v) * n) * n;
*(p_x + 8) = result.x;
*(p_y + 8) = result.y;
*(p_z + 8) = result.z;
sum += result;
// 5
p = make_float3(*(p_x + 5), *(p_y + 5), *(p_z + 5));
result = p - ((p - v) * n) * n;
*(p_x + 5) = result.x;
*(p_y + 5) = result.y;
*(p_z + 5) = result.z;
sum += result;
/******* 4 *******/
p = make_float3(*(p_x + 4), *(p_y + 4), *(p_z + 4));
sum *= 1.0 / 6;
result = sum + (sum - p) * 0.5;
*(p_x + 4) = result.x;
*(p_y + 4) = result.y;
*(p_z + 4) = result.z;
}
#endif
float center_factor = 1.5f;
void calcTriangleCtrlPoint(bool adjust_silhouette, bool use_pn, AlgorithmType algo_type)
{
if (algo_type == CYM)
{
float alpha = 1.0f, beta = 0.0f;
/* */
hipblasStatus_t stat = hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
triangleCtrlPointNum_lower, triangleNum * 6, triangleCtrlPointNum + constrait_point_num,
&alpha,
matrixFittingD + matrixStartIdxFitting, triangleCtrlPointNum_lower,
sampleValueD, triangleCtrlPointNum + constrait_point_num,
&beta,
triangleCtrlPointD, triangleCtrlPointNum_lower);
if (stat != HIPBLAS_STATUS_SUCCESS)
{
cout << "triangleCtrlPointD fail!!!!!!!!!!!!!\tstat = " << stat << endl;
printCudaError(__FILE__, __FUNCTION__, __LINE__);
return;
}
}
//
hipLaunchKernelGGL(( calcAdjustNormal), dim3(blockNumAdjNormal), dim3(blockSizeAdjNormal), sizeof(float) * blockSizeAdjNormal * 8, 0,
triangleListD, triangleNum,
order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
if (algo_type == CYM)
{
//
if (adjust_silhouette)
{
const int move_block_size = 256;
int move_block_num = ceil(static_cast<double>(triangleNum) / move_block_size);
//cout << "move " << endl;
#ifdef NEW_MOVE
hipLaunchKernelGGL(( move), dim3(move_block_num), dim3(move_block_size), 0, 0, triangleListD, triangleCtrlPointD, triangle_adjacent_tableD,
triangleCtrlPointNum_lower, triangleNum, center_factor, use_pn);
#else
hipLaunchKernelGGL(( move), dim3(move_block_num), dim3(move_block_size), 0, 0, triangleListD, triangleCtrlPointD, triangleCtrlPointD + 3 * triangleNum * triangleCtrlPointNum_lower, triangleCtrlPointNum_lower, triangleNum);
#endif
//hipDeviceSynchronize();
//move<<<move_block_num, move_block_size>>>(triangleListD, triangleCtrlPointD, triangleCtrlPointD + 3 * triangleNum * triangleCtrlPointNum_lower, triangle_adjacent_tableD,
//triangleCtrlPointNum_lower, triangleNum, center_factor);
#ifndef MORPH
cout << "center_factor = " << center_factor << endl;
#endif
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
//
hipMemcpy(triangular_ctrl_points, triangleCtrlPointD, sizeof(float) * 3 * triangleNum * triangleCtrlPointNum_lower, hipMemcpyDeviceToHost);
#endif
}
else
{
int blockNum = ceil(static_cast<double>(triangleNum) / 128);
hipLaunchKernelGGL(( calcCtrlPoint_PN), dim3(blockNum), dim3(128), 0, 0, triangleListD, triangle_adjacent_tableD, sampleValueD_PN, triangleCtrlPointD_PN, triangleNormalCtrlPointD_PN, triangleNum, triangleCtrlPointNum_lower);
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
//
hipMemcpy(triangular_ctrl_points, triangleCtrlPointD_PN, sizeof(float) * 3 * triangleNum * triangleCtrlPointNum_lower, hipMemcpyDeviceToHost);
#endif
}
#ifndef MORPH
cout << "triangleNum = " << triangleNum << endl;
#endif
//float *test = new float[6 * triangleNum * 3];
//hipMemcpy(test, triangleNormalCtrlPointD_PN, sizeof(float) * 6 * triangleNum * 3, hipMemcpyDeviceToHost);
//for (int i = 0; i < triangleNum; ++i)
//{
//for (int j = 0; j < 6; ++j)
//{
////cout << i * 10 + j << ", " << (i + triangleNum) * 10 + j << ", " << (i + triangleNum * 2) * 10 + j << endl;
//cout
//<< test[i * 6 + j] << ", "
//<< test[(i + triangleNum) * 6 + j] << ", "
//<< test[(i + triangleNum * 2) * 6 + j] << endl;
//}
//cout << "================" << endl;
//}
//float *test = new float[triangleCtrlPointNum_lower * triangleNum * 6];
//hipMemcpy(test, triangleCtrlPointD, sizeof(float) * triangleCtrlPointNum_lower * triangleNum * 6, hipMemcpyDeviceToHost);
//float *v = test, *n = test + triangleCtrlPointNum_lower * triangleNum * 3;
//for (int i = 0; i < triangleNum; ++i)
////for (int i = 24; i < 25; ++i)
//{
//cout << "i = " << i << endl;
//for (int j = 0; j < triangleCtrlPointNum_lower; ++j)
//{
////if (j != 0 && j != 6) continue;
////cout << i * 10 + j << ", " << (i + triangleNum) * 10 + j << ", " << (i + triangleNum * 2) * 10 + j << endl;
//cout << "\t" << j << ": " << v[i * triangleCtrlPointNum_lower + j] << ", "
//<< v[(i + triangleNum) * triangleCtrlPointNum_lower + j] << ", "
//<< v[(i + triangleNum * 2) * triangleCtrlPointNum_lower + j] << "\t";
//double x = n[i * triangleCtrlPointNum_lower + j];
//double y = n[(i + triangleNum) * triangleCtrlPointNum_lower + j];
//double z = n[(i + triangleNum * 2) * triangleCtrlPointNum_lower + j];
//double length = sqrt(x * x + y * y + z * z);
//cout << "\t" << x / length << ", " << y / length << ", " << z / length << endl;
//}
//cout << "================" << endl;
//}
//delete []test;
}
#ifdef TRUTH
void matrixMul1_truth()
{
float alpha = 1.0f, beta = 0.0f;
hipblasStatus_t stat = hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
samplePointPerTriangle * 3, triangleCtrlPointNum, triangleCtrlPointNum,
&alpha,
BqD_truth, samplePointPerTriangle * 3,
B_1D_truth, triangleCtrlPointNum,
&beta,
BBD_truth, samplePointPerTriangle * 3);
if (stat != HIPBLAS_STATUS_SUCCESS)
{
cout << "CtrlPoint_truth fail!!!!!!!!!!!!!\tstat = " << stat << endl;
printCudaError(__FILE__, __FUNCTION__, __LINE__);
return;
}
}
#endif
/************************************************************************************************************/
__global__ void copy(float *RD,
int activeThreadNumCopy, bool firstLoad, float maxX, float maxY, float maxZ,
TriangleD *triangleListD, int segmentPerEdge, int f, int q,
float *normalPtrVBO, float *texCoordPtrVBO, float *texCoord3DPtrVBO, float *vertexPtrVBO)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNumCopy)
return;
int triangleIdx = globalIdx / q;
int localIdx = globalIdx % q;
vertexPtrVBO[globalIdx * 3 + 0] = RD[triangleIdx * q + localIdx];
vertexPtrVBO[globalIdx * 3 + 1] = RD[(triangleIdx + f) * q + localIdx];
vertexPtrVBO[globalIdx * 3 + 2] = RD[(triangleIdx + f * 2) * q + localIdx];
float *ND = RD + 3 * f * q;
normalPtrVBO[globalIdx * 3 + 0] = ND[triangleIdx * + q + localIdx];
normalPtrVBO[globalIdx * 3 + 1] = ND[(triangleIdx + f) * + q + localIdx];
normalPtrVBO[globalIdx * 3 + 2] = ND[(triangleIdx + f * 2) * + q + localIdx];
//if (firstLoad)
//{
////
//float2 vt0 = triangleListD[triangleIdx].vt[0];
//float2 vt1 = triangleListD[triangleIdx].vt[1];
//float2 vt2 = triangleListD[triangleIdx].vt[2];
//float tempFloorFloat = (sqrtf((float)(localIdx) * 8 + 9) - 3) / 2;
//int floor = rintf(tempFloorFloat);
//if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
//floor = ceilf(tempFloorFloat);
//int room = localIdx - (floor + 1) * floor / 2;
//float3 barycentric_coord;
//barycentric_coord.x = (float)(segmentPerEdge - floor) / segmentPerEdge;
//barycentric_coord.y = (float)(floor - room) / segmentPerEdge;
//barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
//float u = vt0.x * barycentric_coord.x + vt1.x * barycentric_coord.y + vt2.x * barycentric_coord.z;
//float v = vt0.y * barycentric_coord.x + vt1.y * barycentric_coord.y + vt2.y * barycentric_coord.z;
////
//texCoordPtrVBO[globalIdx * 2 + 0] = u;
//texCoordPtrVBO[globalIdx * 2 + 1] = v;
////
//float minMax = maxX;
//if (minMax > maxY)
//minMax = maxY;
//if (minMax > maxZ)
//minMax = maxZ;
////texCoord3DPtrVBO[globalIdx * 3 + 0] = vertexPtrVBO[globalIdx * 3 + 0] / maxX;
////texCoord3DPtrVBO[globalIdx * 3 + 1] = vertexPtrVBO[globalIdx * 3 + 1] / maxY;
////texCoord3DPtrVBO[globalIdx * 3 + 2] = vertexPtrVBO[globalIdx * 3 + 2] / maxZ;
//texCoord3DPtrVBO[globalIdx * 3 + 0] = vertexPtrVBO[globalIdx * 3 + 0] / minMax;
//texCoord3DPtrVBO[globalIdx * 3 + 1] = vertexPtrVBO[globalIdx * 3 + 1] / minMax;
//texCoord3DPtrVBO[globalIdx * 3 + 2] = vertexPtrVBO[globalIdx * 3 + 2] / minMax;
//}
}
#ifdef LINE
__global__ void make_bary(TriangleD *triangleListD, float *baryPtrVBO, float *oriBaryPtrVBO, int n, int q)
{
/***************************** ******************************/
int localIdx = threadIdx.x;
float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) / 2;
int floor = rintf(tempFloorFloat);
if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
floor = ceilf(tempFloorFloat);
int room = localIdx - (floor + 1) * floor / 2;
float3 barycentric_coord;
barycentric_coord.x = (float)(n - floor) / n;
barycentric_coord.y = (float)(floor - room) / n;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
baryPtrVBO[globalIdx * 3 + 0] = (float)(n - floor) / n;
baryPtrVBO[globalIdx * 3 + 1] = barycentric_coord.y;
baryPtrVBO[globalIdx * 3 + 2] = barycentric_coord.z;
/***************************** ******************************/
int triangleIdx = blockIdx.x;
//
float3 bary_origin0 = triangleListD[triangleIdx].bary_origin[0];
float3 bary_origin1 = triangleListD[triangleIdx].bary_origin[1];
float3 bary_origin2 = triangleListD[triangleIdx].bary_origin[2];
//
float3 bary_origin = bary_origin0 * barycentric_coord.x + bary_origin1 * barycentric_coord.y + bary_origin2 * barycentric_coord.z;
//
oriBaryPtrVBO[globalIdx * 3 + 0] = bary_origin.x;
oriBaryPtrVBO[globalIdx * 3 + 1] = bary_origin.y;
oriBaryPtrVBO[globalIdx * 3 + 2] = bary_origin.z;
}
#endif
#ifdef TRUTH
__global__ void copy_truth(float *RD_truth,
int activeThreadNumCopy, bool firstLoad,
TriangleD *triangleListD, int segmentPerEdge, int f, int q,
float *normalPtrVBO_truth, float *vertexPtrVBO_truth)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNumCopy)
return;
int triangleIdx = globalIdx / q;
int localIdx = globalIdx % q;
vertexPtrVBO_truth[globalIdx * 3 + 0] = RD_truth[triangleIdx * q * 3 + localIdx];
vertexPtrVBO_truth[globalIdx * 3 + 1] = RD_truth[(triangleIdx + f) * q * 3 + localIdx];
vertexPtrVBO_truth[globalIdx * 3 + 2] = RD_truth[(triangleIdx + f * 2) * q * 3 + localIdx];
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
float ux = RD_truth[triangleIdx * q * 3 + q + localIdx];
float uy = RD_truth[(triangleIdx + f) * q * 3 + q + localIdx];
float uz = RD_truth[(triangleIdx + f * 2) * q * 3 + q + localIdx];
float vx = RD_truth[triangleIdx * q * 3 + q * 2 + localIdx];
float vy = RD_truth[(triangleIdx + f) * q * 3 + q * 2 + localIdx];
float vz = RD_truth[(triangleIdx + f * 2) * q * 3 + q * 2 + localIdx];
float nx = uy * vz - uz * vy;
float ny = uz * vx - ux * vz;
float nz = ux * vy - uy * vx;
float l = sqrtf(nx * nx + ny * ny + nz * nz);
nx /= l;
ny /= l;
nz /= l;
normalPtrVBO_truth[globalIdx * 3 + 0] = nx;
normalPtrVBO_truth[globalIdx * 3 + 1] = ny;
normalPtrVBO_truth[globalIdx * 3 + 2] = nz;
}
#endif
bool registered = false;
GLuint normalVBO = 0, texCoordVBO = 0, texCoord3DVBO = 0, vertexVBO = 0;
#ifdef LINE
GLuint baryVBO = 0, oriBaryVBO = 0;
#endif
float *normalPtrVBO; //
float *texCoordPtrVBO; //
float *texCoord3DPtrVBO; //
float *vertexPtrVBO; //
#ifdef LINE
float *baryPtrVBO, *oriBaryPtrVBO; //
#endif
struct cudaGraphicsResource *normalVBO_CUDA, *texCoordVBO_CUDA, *texCoord3DVBO_CUDA, *vertexVBO_CUDA;
#ifdef LINE
struct cudaGraphicsResource *baryVBO_CUDA, *oriBaryVBO_CUDA;
#endif
void tessellateD(bool firstLoad, float maxX, float maxY, float maxZ, AlgorithmType algo_type)
{
float alpha = 1.0f, beta = 0.0f;
//
hipblasStatus_t stat;
if (algo_type == CYM)
{
stat = hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
samplePointPerTriangle, triangleNum * 6, triangleCtrlPointNum_lower,
&alpha,
BqD, samplePointPerTriangle,
triangleCtrlPointD, triangleCtrlPointNum_lower,
&beta,
RD, samplePointPerTriangle);
}
//else if (algo_type == PN_CUTTING)
else
{
stat = hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
samplePointPerTriangle, triangleNum * 3, triangleCtrlPointNum_lower,
&alpha,
BqD, samplePointPerTriangle,
triangleCtrlPointD_PN, triangleCtrlPointNum_lower,
&beta,
RD, samplePointPerTriangle);
stat = hipblasSgemm(cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N,
samplePointPerTriangle, triangleNum * 3, 6,
&alpha,
BqD_PN, samplePointPerTriangle,
triangleNormalCtrlPointD_PN, 6,
&beta,
RD + samplePointPerTriangle * triangleNum * 3, samplePointPerTriangle);
}
if (stat != HIPBLAS_STATUS_SUCCESS)
{
cout << "RD fail!!!!!!!!!!!!!\tstat = " << stat << "\t\t";
printCudaError(__FILE__, __FUNCTION__, __LINE__);
cout << endl;
return;
}
hipGraphicsMapResources(1, &normalVBO_CUDA, 0);
hipGraphicsMapResources(1, &texCoordVBO_CUDA, 0);
hipGraphicsMapResources(1, &texCoord3DVBO_CUDA, 0);
hipGraphicsMapResources(1, &vertexVBO_CUDA, 0);
#ifdef LINE
hipGraphicsMapResources(1, &baryVBO_CUDA, 0);
hipGraphicsMapResources(1, &oriBaryVBO_CUDA, 0);
#endif
size_t size2 = sizeof(float) * samplePointPerTriangle * triangleNum * 2;
size_t size3 = sizeof(float) * samplePointPerTriangle * triangleNum * 3;
hipGraphicsResourceGetMappedPointer((void**)&normalPtrVBO, &size3, normalVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&texCoordPtrVBO, &size2, texCoordVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&texCoord3DPtrVBO, &size2, texCoord3DVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&vertexPtrVBO, &size3, vertexVBO_CUDA);
#ifdef LINE
hipGraphicsResourceGetMappedPointer((void**)&baryPtrVBO, &size3, baryVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&oriBaryPtrVBO, &size3, oriBaryVBO_CUDA);
#endif
hipLaunchKernelGGL(( copy), dim3(blockNumCopy), dim3(blockSizeCopy), 0, 0, RD,
activeThreadNumCopy, firstLoad, maxX, maxY, maxZ, triangleListD, segmentPerEdge, triangleNum, samplePointPerTriangle,
normalPtrVBO, texCoordPtrVBO, texCoord3DPtrVBO, vertexPtrVBO);
#ifdef LINE
hipLaunchKernelGGL(( make_bary), dim3(triangleNum), dim3(samplePointPerTriangle), 0, 0, triangleListD, baryPtrVBO, oriBaryPtrVBO, segmentPerEdge, samplePointPerTriangle);
#endif
hipGraphicsUnmapResources(1, &normalVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &texCoordVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &texCoord3DVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &vertexVBO_CUDA, 0);
#ifdef LINE
hipGraphicsUnmapResources(1, &baryVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &oriBaryVBO_CUDA, 0);
#endif
}
//#ifdef TRUTH
GLuint normalVBO_truth = 0, vertexVBO_truth = 0;
float *normalPtrVBO_truth; //
float *vertexPtrVBO_truth; //
struct cudaGraphicsResource* normalVBO_CUDA_truth;
struct cudaGraphicsResource* vertexVBO_CUDA_truth;
//double vertex_error_ave_max = 0.0, vertex_error_max_max = 0.0;
//double normal_error_ave_max = 0.0, normal_error_max_max = 0.0;
int triangleCoord(int floor, int room)
{
return (1 + floor) * floor / 2 + room;
}
__global__ void my_to_truth(int f, int q, int point_per_real_face, float *myV, float *realV, int *my_to_truth_tableD, int *belongs_to_originD)
{
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
int triangleIdx = globalIdx / q;
if (triangleIdx >= f)
return;
//if (blockIdx.x == 0 && threadIdx.x == 0)
//{
//printf("test = %f ,, %f ,, %f\n", realV[3 * 3], realV[3 * 3 + 1], realV[3 * 3 + 2]);
//}
float dist_min = 999999, idx_min = -1;
realV += belongs_to_originD[triangleIdx] * point_per_real_face * 3;
//printf("shift = %d, belongto = %d, pointper = %d\n", belongs_to_originD[triangleIdx] * point_per_real_face, belongs_to_originD[triangleIdx], point_per_real_face);
//printf("f = %d, q = %d, belongs_to = %d\n", f, q, belongs_to_originD[triangleIdx]);
//for (int i = 0; i < point_per_real_face * 36; ++i)
for (int i = 0; i < point_per_real_face; ++i)
{
float dx = myV[globalIdx * 3] - realV[i * 3];
float dy = myV[globalIdx * 3 + 1] - realV[i * 3 + 1];
float dz = myV[globalIdx * 3 + 2] - realV[i * 3 + 2];
float dist = sqrt(dx * dx + dy * dy + dz * dz);
if (dist < dist_min)
{
dist_min = dist;
idx_min = i;
}
//if (blockIdx.x == 0 && threadIdx.x == 0)
//{
//printf("i = %d, dx = %f, dy = %f, fz = %f, dist = %f, origin = %d, now_in_face = %d\n", i, dx, dy, dz, dist, belongs_to_originD[triangleIdx], i / point_per_real_face);
//}
}
//printf("idx_min = %d\n", idx_min);
//my_to_truth_tableD[globalIdx] = idx_min;
my_to_truth_tableD[globalIdx] = idx_min + belongs_to_originD[triangleIdx] * point_per_real_face;
}
__global__ void deformTeapot(float3 *vertexParamListD_teapot, float3 *normalParamListD_teapot,
int activeThreadNum,
float *normalPtrVBO_truth, float *vertexPtrVBO_truth,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
// u, v, w x, y, z
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= activeThreadNum)
return;
float u = vertexParamListD_teapot[i].x;
float v = vertexParamListD_teapot[i].y;
float w = vertexParamListD_teapot[i].z;
//if (blockIdx.x == 0 && threadIdx.x == 3)
//printf("%f, %f, %f\n", u, v, w);
// u, v, w
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
//
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// U
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// V
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// W
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// B
// fu J_barF_bar_xuF_bar_yuF_bar_zu
// fv J_barF_bar_xvF_bar_yvF_bar_zv
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
//sampleValueD[index2c(localIdx, triangleIdx , m + c)] = result.x;
//sampleValueD[index2c(localIdx, triangleIdx + f , m + c)] = result.y;
//sampleValueD[index2c(localIdx, triangleIdx + f * 2, m + c)] = result.z;
vertexPtrVBO_truth[i * 3 + 0] = result.x;
vertexPtrVBO_truth[i * 3 + 1] = result.y;
vertexPtrVBO_truth[i * 3 + 2] = result.z;
//printf("vertexPtrVBO = %f, %f, %f\n", vertexPtrVBO_truth[i * 3], vertexPtrVBO_truth[i * 3 + 1], vertexPtrVBO_truth[i * 3 + 2]);
///////////////////////////////////////////////////////////////////////////////
// fw J_barF_bar_xwF_bar_ywF_bar_zw
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
u = normalParamListD_teapot[i].x;
v = normalParamListD_teapot[i].y;
w = normalParamListD_teapot[i].z;
//float *sampleNormalD = sampleValueD + 3 * f * (m + c);
// J_bar_star_T_[012]J_bar(J_bar*T)
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
normalPtrVBO_truth[i * 3 + 0] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//sampleNormalD[index2c(localIdx, triangleIdx, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
normalPtrVBO_truth[i * 3 + 1] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//sampleNormalD[index2c(localIdx, triangleIdx + f, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]J_bar(J_bar*T)
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
normalPtrVBO_truth[i * 3 + 2] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//sampleNormalD[index2c(localIdx, triangleIdx + f * 2, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//float tx = normalPtrVBO_truth[i * 3 + 0];
//float ty = normalPtrVBO_truth[i * 3 + 1];
//float tz = normalPtrVBO_truth[i * 3 + 2];
//float length = sqrt(tx * tx + ty * ty + tz * tz);
//tx /= length;
//ty /= length;
//tz /= length;
//printf("ori = %f, %f, %f\tdeformed = %f, %f, %f\n", u, v, w, tx, ty, tz);
}
using namespace objdata;
float color_map_vertex(const VertexCoord &v0, const VertexCoord v1, float range)
{
return (v0 - v1).norm() / range;
}
float color_map_normal(const NormalCoord &n0, const NormalCoord &n1, float range)
{
float result = 2 * asin((n0 - n1).norm() * 0.5) / range;
if (result < 0)
result = 0;
else if (result > 1)
result = 1;
return result;
//return 2 * asin((n0 - n1).norm() * 0.5) / range;
}
float *texture_coord;
void tessellateD_truth(bool adjust_silhouette, bool firstLoad, vector<int> &teapotFaceList, vector<int> &belongs_to_origin, int u_seg, int v_seg)
{
hipError_t cymError;
//cymError = hipMemcpy(my_to_truth_table, my_to_truth_tableD, sizeof(int) * samplePointPerTriangle * triangleNum, hipMemcpyDeviceToHost);
//if (cymError)
//cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
size_t size2 = sizeof(float) * samplePointPerTriangle * triangleNum * 2;
size_t size3 = sizeof(float) * samplePointPerTriangle * triangleNum * 3;
size_t size3_truth = sizeof(float) * vertexCount_teapot * 3;
cymError = hipGetLastError();
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
hipGraphicsMapResources(1, &normalVBO_CUDA, 0);
hipGraphicsMapResources(1, &vertexVBO_CUDA, 0);
hipGraphicsMapResources(1, &texCoordVBO_CUDA, 0);
hipGraphicsMapResources(1, &texCoord3DVBO_CUDA, 0);
hipGraphicsMapResources(1, &normalVBO_CUDA_truth, 0);
hipGraphicsMapResources(1, &vertexVBO_CUDA_truth, 0);
hipGraphicsResourceGetMappedPointer((void**)&normalPtrVBO, &size3, normalVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&vertexPtrVBO, &size3, vertexVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&texCoordPtrVBO, &size2, texCoordVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&texCoord3DPtrVBO, &size2, texCoord3DVBO_CUDA);
hipGraphicsResourceGetMappedPointer((void**)&normalPtrVBO_truth, &size3_truth, normalVBO_CUDA_truth);
hipGraphicsResourceGetMappedPointer((void**)&vertexPtrVBO_truth, &size3_truth, vertexVBO_CUDA_truth);
//
int block_size = 128;
int block_num = ceil(static_cast<double>(vertexCount_teapot) / block_size);
hipLaunchKernelGGL(( deformTeapot), dim3(block_num), dim3(block_size), sizeof(float) * block_size * 13, 0,
vertexParamListD_teapot, normalParamListD_teapot,
vertexCount_teapot,
normalPtrVBO_truth, vertexPtrVBO_truth,
order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
cymError = hipGetLastError();
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
//
if (firstLoad)
{
texture_coord = new float[size2 / sizeof(float)];
cout << "" << endl;
cout << "block_num = " << block_num << endl;
int size = belongs_to_origin.size();
int *belongs_to_originD;
hipMalloc((void**)&belongs_to_originD, sizeof(int) * size);
//cout << "belongsize = " << size << endl;
cymError = hipMemcpy(belongs_to_originD, &belongs_to_origin[0], sizeof(int) * size, hipMemcpyHostToDevice);
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
int block_size = 128;
int block_num = ceil(static_cast<double>(triangleNum * samplePointPerTriangle) / block_size);
cout << "my_to_truth.blockNum = " << block_num << endl;
hipLaunchKernelGGL(( my_to_truth), dim3(block_num), dim3(block_size), 0, 0, triangleNum, samplePointPerTriangle,
(u_seg + 1) * (v_seg + 1), vertexPtrVBO, vertexPtrVBO_truth,
my_to_truth_tableD, belongs_to_originD);
cymError = hipGetLastError();
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
cymError = hipMemcpy(my_to_truth_table, my_to_truth_tableD, sizeof(int) * samplePointPerTriangle * triangleNum, hipMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
//cout << "triangleNum = " << triangleNum << ", samplePointPerTriangle = "
//<< samplePointPerTriangle << endl;
//for (int i = 0; i < samplePointPerTriangle * triangleNum; ++i)
//{
//cout << my_to_truth_table[i] << " ";
//if (i % 20 == 19)
//cout << endl;
//}
//int *tttt = new int[size];
//hipMemcpy(tttt, belongs_to_originD, sizeof(int) * size, hipMemcpyDeviceToHost);
//for (int i = 0; i < size; ++i)
//{
//cout << tttt[i] << " ";
//if (i % 20 == 19)
//cout << endl;
//}
//delete []tttt;
}
/*------------------------ ----------------------------*/
/* */
float *result = new float[size3 / sizeof(float)];
float *result_truth = new float[size3_truth / sizeof(float)];
cymError = hipMemcpy(result, vertexPtrVBO, size3, hipMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
cymError = hipMemcpy(result_truth, vertexPtrVBO_truth, size3_truth, hipMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
double vertex_error_ave_max = 0.0, vertex_error_max_max = 0.0;
double normal_error_ave_max = 0.0, normal_error_max_max = 0.0;
double error_ave = 0.0, error_max = 0.0;
for (int i = 0; i < samplePointPerTriangle * triangleNum; ++i)
{
double x0 = result[i * 3];
double y0 = result[i * 3 + 1];
double z0 = result[i * 3 + 2];
int real_idx = my_to_truth_table[i];
double x1 = result_truth[real_idx * 3];
double y1 = result_truth[real_idx * 3 + 1];
double z1 = result_truth[real_idx * 3 + 2];
double error = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1) + (z0 - z1) * (z0 - z1));
//cout << "error = " << error << endl;
error_ave += error;
if (error_max < error)
error_max = error;
float vertex_diff = color_map_vertex(VertexCoord(x0, y0, z0), VertexCoord(x1, y1, z1), 0.04);
texture_coord[i * 2] = vertex_diff;
texture_coord[i * 2 + 1] = 0.5;
}
hipMemcpy(texCoordPtrVBO, texture_coord, size2, hipMemcpyHostToDevice);
/*cout << "eeeeee samplePonitPerTriangle = " << samplePointPerTriangle << endl;*/
/*cout << "eeeeee triangleNum = " << triangleNum << endl;*/
/*cout << "eeeeee samplePonitPerTriangle * triangleNum = " << samplePointPerTriangle * triangleNum << endl;*/
/*cout << "eeeeee error = " << error_ave / (samplePointPerTriangle * triangleNum)*/
/*<< ", error_max = " << error_max << endl;*/
if (error_ave > vertex_error_ave_max)
vertex_error_ave_max = error_ave;
if (error_max > vertex_error_max_max)
vertex_error_max_max = error_max;
if (adjust_silhouette)
cout << "" << endl;
else
cout << "" << endl;
cout << "eeeeee = " << vertex_error_ave_max / (samplePointPerTriangle * triangleNum) << ", = " << vertex_error_max_max << endl;
/* */
double volume = 0.0;
for (int f = 0; f < triangleNum; ++f)
{
for (int i = 0; i < segmentPerEdge; ++i)
{
for (int j = 0; j <= i; ++j)
{
// smooth FFD
double v0x = result[samplePointPerTriangle * 3 * f + triangleCoord(i, j) * 3 + 0];
double v0y = result[samplePointPerTriangle * 3 * f + triangleCoord(i, j) * 3 + 1];
double v0z = result[samplePointPerTriangle * 3 * f + triangleCoord(i, j) * 3 + 2];
double v1x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 0];
double v1y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 1];
double v1z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 2];
double v2x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 0];
double v2y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 1];
double v2z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 2];
volume += (v0z + v1z + v2z) * ((v1x - v0x) * (v2y - v0y) - (v2x - v0x) * (v1y - v0y));
if (i < segmentPerEdge - 1)
{
double v0x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 0];
double v0y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 1];
double v0z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 2];
double v1x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 2, j + 1) * 3 + 0];
double v1y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 2, j + 1) * 3 + 1];
double v1z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 2, j + 1) * 3 + 2];
double v2x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 0];
double v2y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 1];
double v2z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 2];
volume += (v0z + v1z + v2z) * ((v1x - v0x) * (v2y - v0y) - (v2x - v0x) * (v1y - v0y));
}
}
}
}
volume /= 6;
double volume_truth = 0.0;
for (vector<int>::size_type i = 0; i < teapotFaceList.size() / 3; ++i)
{
int id0 = teapotFaceList[i * 3];
int id1 = teapotFaceList[i * 3 + 1];
int id2 = teapotFaceList[i * 3 + 2];
double v0x = result_truth[id0 * 3];
double v0y = result_truth[id0 * 3 + 1];
double v0z = result_truth[id0 * 3 + 2];
double v1x = result_truth[id1 * 3];
double v1y = result_truth[id1 * 3 + 1];
double v1z = result_truth[id1 * 3 + 2];
double v2x = result_truth[id2 * 3];
double v2y = result_truth[id2 * 3 + 1];
double v2z = result_truth[id2 * 3 + 2];
volume_truth += (v0z + v1z + v2z) * ((v1x - v0x) * (v2y - v0y) - (v2x - v0x) * (v1y - v0y));
}
volume_truth /= 6;
cout << "eeeeee = " << volume << ", = " << volume_truth << endl;
cout << "eeeeee = " << volume - volume_truth << ", = " << fabs(volume - volume_truth) / volume_truth << endl;
/* */
cymError = hipMemcpy(result, normalPtrVBO, size3, hipMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
cymError = hipMemcpy(result_truth, normalPtrVBO_truth, size3_truth, hipMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
error_ave = 0.0, error_max = 0.0;
float x0_max, y0_max, z0_max, x1_max, y1_max, z1_max;
const float PI = 3.14159265358979;
for (int i = 0; i < samplePointPerTriangle * triangleNum; ++i)
{
double x0 = result[i * 3];
double y0 = result[i * 3 + 1];
double z0 = result[i * 3 + 2];
int real_idx = my_to_truth_table[i];
double x1 = result_truth[real_idx * 3];
double y1 = result_truth[real_idx * 3 + 1];
double z1 = result_truth[real_idx * 3 + 2];
double length = sqrt(x0 * x0 + y0 * y0 + z0 * z0);
x0 /= length; y0 /= length; z0 /= length;
length = sqrt(x1 * x1 + y1 * y1 + z1 * z1);
x1 /= length; y1 /= length; z1 /= length;
//cout << "ori = " << x0 << ", " << y0 << ", " << z0 << "\t"
//<< "deform = " << x1 << ", " << y1 << ", " << z1 << endl;
double error = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1) + (z0 - z1) * (z0 - z1));
error = 2 * asin(error * 0.5);
//error = 1 * asin(error / 1);
error_ave += error;
if (error_max < error)
{
error_max = error;
x0_max = x0;
y0_max = y0;
z0_max = z0;
x1_max = x1;
y1_max = y1;
z1_max = z1;
}
//float normal_diff = color_map_normal(VertexCoord(x0, y0, z0), VertexCoord(x1, y1, z1), PI / 3);
float normal_diff = color_map_normal(VertexCoord(x0, y0, z0), VertexCoord(x1, y1, z1), PI / 20);
texture_coord[i * 2] = normal_diff;
texture_coord[i * 2 + 1] = 0.5;
}
hipMemcpy(texCoord3DPtrVBO, texture_coord, size2, hipMemcpyHostToDevice);
if (error_ave > normal_error_ave_max)
normal_error_ave_max = error_ave;
if (error_max > normal_error_max_max)
normal_error_max_max = error_max;
cout << "max0 = " << x0_max << ", " << y0_max << ", " << z0_max;
cout << "\tmax1 = " << x1_max << ", " << y1_max << ", " << z1_max << endl;
cout << "eeeeee = " << normal_error_ave_max / (samplePointPerTriangle * triangleNum) * 180 / PI
<< ", = " << normal_error_max_max * 180 / PI << endl << endl;
hipGraphicsUnmapResources(1, &normalVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &vertexVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &texCoordVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &texCoord3DVBO_CUDA, 0);
hipGraphicsUnmapResources(1, &normalVBO_CUDA_truth, 0);
hipGraphicsUnmapResources(1, &vertexVBO_CUDA_truth, 0);
cymError = hipGetLastError();
if (cymError)
cout << __FILE__ << "" << __LINE__ << ", " << cymError << ": " << hipGetErrorString(cymError) << endl;
delete []result;
delete []result_truth;
/*---------------------- --------------------------*/
}
//#endif
/************************************************************************************************************/
void setGLDevice()
{
hipGLSetGLDevice(0);
}
/* cuda OpenGL */
void regGLBuffer()
{
printCudaError(__FILE__, __FUNCTION__, __LINE__);
if (registered)
{
hipGraphicsUnregisterResource(normalVBO_CUDA);
hipGraphicsUnregisterResource(texCoordVBO_CUDA);
hipGraphicsUnregisterResource(texCoord3DVBO_CUDA);
hipGraphicsUnregisterResource(vertexVBO_CUDA);
#ifdef LINE
hipGraphicsUnregisterResource(baryVBO_CUDA);
hipGraphicsUnregisterResource(oriBaryVBO_CUDA);
#endif
//#ifdef TRUTH
hipGraphicsUnregisterResource(normalVBO_CUDA_truth);
hipGraphicsUnregisterResource(vertexVBO_CUDA_truth);
//#endif
registered = false;
}
printCudaError(__FILE__, __FUNCTION__, __LINE__);
hipGraphicsGLRegisterBuffer(&normalVBO_CUDA, normalVBO, hipGraphicsMapFlagsWriteDiscard);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
hipGraphicsGLRegisterBuffer(&texCoordVBO_CUDA, texCoordVBO, hipGraphicsMapFlagsWriteDiscard);
hipGraphicsGLRegisterBuffer(&texCoord3DVBO_CUDA, texCoord3DVBO, hipGraphicsMapFlagsWriteDiscard);
hipGraphicsGLRegisterBuffer(&vertexVBO_CUDA, vertexVBO, hipGraphicsMapFlagsWriteDiscard);
#ifdef LINE
hipGraphicsGLRegisterBuffer(&baryVBO_CUDA, baryVBO, hipGraphicsMapFlagsWriteDiscard);
hipGraphicsGLRegisterBuffer(&oriBaryVBO_CUDA, oriBaryVBO, hipGraphicsMapFlagsWriteDiscard);
#endif
//#ifdef TRUTH
hipGraphicsGLRegisterBuffer(&normalVBO_CUDA_truth, normalVBO_truth, hipGraphicsMapFlagsWriteDiscard);
hipGraphicsGLRegisterBuffer(&vertexVBO_CUDA_truth, vertexVBO_truth, hipGraphicsMapFlagsWriteDiscard);
//#endif
registered = true;
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
/************************************************************************************************************/
void cudaFreeNonZero(void **ptr)
{
if (*ptr)
{
hipFree(*ptr);
*ptr = 0;
}
}
void freeTessMemD()
{
cudaFreeNonZero((void**)&BqD);
cudaFreeNonZero((void**)&BqD_PN);
cudaFreeNonZero((void**)&RD);
cudaFreeNonZero((void**)&my_to_truth_tableD);
delete []my_to_truth_table;
#ifdef TRUTH
cudaFreeNonZero((void**)&BqD_truth);
cudaFreeNonZero((void**)&BBD_truth);
cudaFreeNonZero((void**)&RD_truth);
#endif
}
void freeModelMemD()
{
cudaFreeNonZero((void**)&vertexParamListD);
cudaFreeNonZero((void**)&vertexCoordListD);
cudaFreeNonZero((void**)&vertexParamListD_teapot);
//cudaFreeNonZero((void**)&vertexCoordListD_teapot);
cudaFreeNonZero((void**)&triangleListD);
cudaFreeNonZero((void**)&sampleValueD);
cudaFreeNonZero((void**)&sampleValueD_PN);
cudaFreeNonZero((void**)&triangleCtrlPointD);
cudaFreeNonZero((void**)&triangleCtrlPointD_PN);
cudaFreeNonZero((void**)&triangleNormalCtrlPointD_PN);
cudaFreeNonZero((void**)&triangle_adjacent_tableD);
#ifdef TRUTH
cudaFreeNonZero((void**)&sampleValueD_truth);
cudaFreeNonZero((void**)&B_1D_truth);
#endif
degreeMemD = 0;
modelMemD = 0;
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
delete []triangular_ctrl_points;
#endif
freeTessMemD();
}
void freeMemD()
{
if (registered)
{
hipGraphicsUnregisterResource(normalVBO_CUDA);
hipGraphicsUnregisterResource(texCoordVBO_CUDA);
hipGraphicsUnregisterResource(texCoord3DVBO_CUDA);
hipGraphicsUnregisterResource(vertexVBO_CUDA);
#ifdef LINE
hipGraphicsUnregisterResource(baryVBO_CUDA);
hipGraphicsUnregisterResource(oriBaryVBO_CUDA);
#endif
//#ifdef TRUTH
hipGraphicsUnregisterResource(normalVBO_CUDA_truth);
hipGraphicsUnregisterResource(vertexVBO_CUDA_truth);
//#endif
registered = false;
}
if (cublas_handle)
{
hipblasDestroy(cublas_handle);
}
cudaFreeNonZero((void**)&matrixFittingIdxD);
cudaFreeNonZero((void**)&matrixFittingD);
permanentMemD = 0;
freeModelMemD();
}
| b3d461567ec458c1084c4543f583c38004b6ba60.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <cuda.h>
#include <cuda_gl_interop.h>
#include <cublas_v2.h>
#include "common_data.h"
using namespace std;
int totalMemD = 0;
int permanentMemD = 0; // 与模型无关的内存使用量(只需要程序退出时释放)
int modelMemD = 0; // 仅和模型相关的内存使用量(重新载入模型时释放)
int degreeMemD = 0; // 仅和B样条体次数相关的内存使用量(重新设定B样条体、重新载入模型时释放)
int tessMemD = 0; // 和细分程度相关的内存使用量(重新设定采样点数、重新设定B样条体、重新载入模型时释放)
int viewMemD = 0; // 显示函数申请的显存量
//ofstream fout("cuda.txt");
void callCudaThreadSynchronize()
{
cudaThreadSynchronize();
}
/* B 样条体求值所需的矩阵 */
extern float matrix_b_spline_f[185];
static __device__ float matrix_b_spline_d[185];
/* 根据阶数、控制顶点数、左端节点的编号返回相应的 B 样条矩阵(用于 B 样条体求值) */
template <typename T>
__host__ __device__ T *matrixCase(T *matrix_b_spline, int order, int ctrlPointNum, int leftIdx)
{
if (order == 1)
return matrix_b_spline; // MB1
else if (order == 2)
return matrix_b_spline + 1; // MB2
else if (order == 3)
{
if (ctrlPointNum == 3)
return matrix_b_spline + 5; // MB30
else
{
if (leftIdx == 2)
return matrix_b_spline + 14; // MB31
else if (leftIdx == ctrlPointNum - 1)
return matrix_b_spline + 23; // MB32
else
return matrix_b_spline + 32; // MB33
}
}
else
{
if (ctrlPointNum == 4)
return matrix_b_spline + 41; // MB40
else if (ctrlPointNum == 5)
{
if (leftIdx == 3)
return matrix_b_spline + 57; // MB41
else
return matrix_b_spline + 73; // MB42
}
else if (ctrlPointNum == 6)
{
if (leftIdx == 3)
return matrix_b_spline + 89; // MB43
else if (leftIdx == 4)
return matrix_b_spline + 105; // MB44
else
return matrix_b_spline + 121; // MB45
}
else
{
if (leftIdx == 3)
return matrix_b_spline + 89; // MB43
else if (leftIdx == 4)
return matrix_b_spline + 137; // MB46
else if (leftIdx == ctrlPointNum - 2)
return matrix_b_spline + 153; // MB47
else if (leftIdx == ctrlPointNum - 1)
return matrix_b_spline + 121; // MB45
else
return matrix_b_spline + 169; // MB48
}
}
}
// 便于CPU端调用的一个代理函数
double *matrixCaseHost(double *matrix_b_spline, int order, int ctrlPointNum, int leftIdx)
{
return matrixCase(matrix_b_spline, order, ctrlPointNum, leftIdx);
}
static __device__ float3 ctrlPointD[15][15][15]; // 原始控制顶点,目前只用于求truth或者FFD结果
static __device__ float knotListD[3 * 20]; // 节点序列
/*
* 使用矩阵乘法求 B 样条体的值
* 仅用于 FFD 算法
*/
__device__ float3 BSplineVolumeValueMatrixD(float u, float v, float w,
int leftUIdx, int leftVIdx, int leftWIdx,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
float3 result;
float3 tempCtrlPoint1[4];
float3 tempCtrlPoint2[4][4];
float *M, temp[4], mul1[4];
float tempKnot = knotListD[leftUIdx];
u = (u - tempKnot) / (knotListD[leftUIdx + 1] - tempKnot);
tempKnot = knotListD[20 + leftVIdx];
v = (v - tempKnot) / (knotListD[20 + leftVIdx + 1] - tempKnot);
tempKnot = knotListD[40 + leftWIdx];
w = (w - tempKnot) / (knotListD[40 + leftWIdx + 1] - tempKnot);
// 由三维控制顶点算出二维临时控制顶点
temp[0] = 1.0f;
temp[1] = w;
temp[2] = w * w;
temp[3] = temp[2] * w;
M = matrixCase(matrix_b_spline_d, orderW, ctrlPointNumW, leftWIdx);
for (int i = 0; i < orderW; ++i)
{
mul1[i] = 0.0f;
for (int j = 0; j < orderW; ++j)
{
mul1[i] += temp[j] * M[j * orderW + i];
}
}
for (int i = 0; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
tempCtrlPoint2[i][j].x = 0.0f;
tempCtrlPoint2[i][j].y = 0.0f;
tempCtrlPoint2[i][j].z = 0.0f;
for (int k = 0; k < orderW; ++k)
{
float3 cp = ctrlPointD[leftUIdx - i][leftVIdx - j][leftWIdx - k];
tempCtrlPoint2[i][j].x += cp.x * mul1[orderW - 1 - k];
tempCtrlPoint2[i][j].y += cp.y * mul1[orderW - 1 - k];
tempCtrlPoint2[i][j].z += cp.z * mul1[orderW - 1 - k];
}
}
}
// 由二维临时控制顶点算出一维临时控制顶点
temp[1] = v;
temp[2] = v * v;
temp[3] = temp[2] * v;
M = matrixCase(matrix_b_spline_d, orderV, ctrlPointNumV, leftVIdx);
for (int i = 0; i < orderV; ++i)
{
mul1[i] = 0.0;
for (int j = 0; j < orderV; ++j)
{
mul1[i] += temp[j] * M[j * orderV + i];
}
}
for (int i = 0; i < orderU; ++i)
{
tempCtrlPoint1[i].x = 0.0f;
tempCtrlPoint1[i].y = 0.0f;
tempCtrlPoint1[i].z = 0.0f;
for (int j = 0; j < orderV; ++j)
{
tempCtrlPoint1[i].x += tempCtrlPoint2[i][j].x * mul1[orderV - 1 - j];
tempCtrlPoint1[i].y += tempCtrlPoint2[i][j].y * mul1[orderV - 1 - j];
tempCtrlPoint1[i].z += tempCtrlPoint2[i][j].z * mul1[orderV - 1 - j];
}
}
// 由一维临时控制顶点算出结果
temp[1] = u;
temp[2] = u * u;
temp[3] = temp[2] * u;
M = matrixCase(matrix_b_spline_d, orderU, ctrlPointNumU, leftUIdx);
for (int i = 0; i < orderU; ++i)
{
mul1[i] = 0.0;
for (int j = 0; j < orderU; ++j)
{
mul1[i] += temp[j] * M[j * orderU + i];
}
}
result.x = 0.0f;
result.y = 0.0f;
result.z = 0.0f;
for (int i = 0; i < orderU; ++i)
{
result.x += tempCtrlPoint1[i].x * mul1[orderU - 1 - i];
result.y += tempCtrlPoint1[i].y * mul1[orderU - 1 - i];
result.z += tempCtrlPoint1[i].z * mul1[orderU - 1 - i];
}
return result;
}
/*
* kernel,计算三个方向参数分别为 u, v, w 的点的 B 样条体值
* 仅用于 FFD 算法
*/
__global__ void fromParamToCoordOnePoint(float3 *vertexCoordListD, float3 *vertexParamListD,
int vertexCount, int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW,
int knotIntervalCountU, int knotIntervalCountV, int knotIntervalCountW)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= vertexCount)
return;
float3 tempVertexParam = vertexParamListD[idx];
float u = tempVertexParam.x;
float v = tempVertexParam.y;
float w = tempVertexParam.z;
// 预先将其值设为最大,将末端点归入最后一段
int leftUIdx, leftVIdx, leftWIdx;
leftUIdx = orderU - 1 + knotIntervalCountU - 1;
leftVIdx = orderV - 1 + knotIntervalCountV - 1;
leftWIdx = orderW - 1 + knotIntervalCountW - 1;
// 沿 U 方向查找当前点所在的节点区间
for (int i = orderU - 1; i <= orderU - 1 + knotIntervalCountU - 1; ++i)
{
if (u >= knotListD[i] && u < knotListD[i + 1])
{
leftUIdx = i;
break;
}
}
// 沿 V 方向查找当前点所在的节点区间
for (int j = orderV - 1; j <= orderV - 1 + knotIntervalCountV - 1; ++j)
{
if (v >= knotListD[20 + j] && v < knotListD[20 + j + 1])
{
leftVIdx = j;
break;
}
}
// 沿 W 方向查找当前点所在的节点区间
for (int k = orderW - 1; k <= orderW - 1 + knotIntervalCountW - 1; ++k)
{
if (w >= knotListD[40 + k] && w < knotListD[40 + k + 1])
{
leftWIdx = k;
break;
}
}
vertexCoordListD[idx] = BSplineVolumeValueMatrixD(u, v, w, leftUIdx, leftVIdx, leftWIdx,
orderU, orderV, orderW,
ctrlPointNumU, ctrlPointNumV, ctrlPointNumW);
}
float3 *vertexParamListD = 0; // 模型顶点参数序列
float3 *vertexCoordListD = 0; // 模型顶点坐标序列
float3 *vertexParamListD_teapot = 0; // 模型顶点参数序列
float3 *normalParamListD_teapot = 0; // 模型顶点参数序列
//float3 *vertexCoordListD_teapot = 0; // 模型顶点参数序列
int vertexCount_teapot;
int order[3], ctrlPointNum[3], knotIntervalCount[3], knotCount[3]; // 三个方向的阶数、控制顶点数、节点区间数、节点数
float knotList[3][20]; // 三个方向的节点向量
float3 ctrlPoint[15][15][15]; // B样条体的控制顶点
/*
* 根据所有顶点的参数,计算出相应的 B 样条体值
* 仅用于 FFD 算法
*/
void fromParamToCoordD(CommonData *commonData)
{
int vertexCount = commonData->vertexCount();
int threadCount = commonData->ffdThreadCount();
fromParamToCoordOnePoint<<<vertexCount / threadCount + 1, threadCount>>>(
vertexCoordListD, vertexParamListD,
vertexCount, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W],
knotIntervalCount[U], knotIntervalCount[V], knotIntervalCount[W]);
float3 *vertexCoordList = new float3[vertexCount];
cudaMemcpy(vertexCoordList, vertexCoordListD, sizeof(float3) * vertexCount, cudaMemcpyDeviceToHost);
for (int i = 0; i < vertexCount; ++i)
commonData->setVertexCoord(i, vertexCoordList[i].x, vertexCoordList[i].y, vertexCoordList[i].z);
delete []vertexCoordList;
}
/*------------------------------------------------------- 上面是FFD算法部分 ---------------------------------------------------------*/
/*------------------------------------------------------- 下面是AFFD算法部分 ---------------------------------------------------------*/
/* 把数字a转换成一个逗号分节的string */
string longNumber(int a)
{
string result;
do
{
ostringstream oss;
int remainder = a % 1000;
if (a >= 1000)
{
if (remainder < 10)
oss << "00" << remainder;
else if (remainder >= 10 && remainder < 100)
oss << "0" << remainder;
else
oss << remainder;
}
else
oss << remainder;
if (result.size() == 0)
result = oss.str();
else
result = oss.str() + "," + result;
a /= 1000;
}while(a > 0);
return result;
}
/* 打印显存使用量 */
void printMemD(const char *file, const char *function, int line, int memSize, string info)
{
/* 只取文件名部分,路径舍弃 */
string fileName(file);
int lastSlashPos = fileName.rfind('/');
fileName = fileName.substr(lastSlashPos + 1, fileName.size());
/*#define PRINT_MEM*/
#ifdef PRINT_MEM
/*作废totalMemD += memSize;*/
cout << info << "\n"
<< "\t文件" << fileName << ",函数" << function << ", 第" << line << "行,申请显存" << longNumber(memSize) << "字节, "
<< "目前累计使用显存" << longNumber(permanentMemD + modelMemD + degreeMemD + tessMemD + viewMemD) << "字节\n"
<< "\t其中permanent = " << longNumber(permanentMemD) << ", model = " << longNumber(modelMemD)
<< ", degreeMemD = " << longNumber(degreeMemD) << ", tessMemD = " << longNumber(tessMemD)
<< ", view = " << longNumber(viewMemD) << endl;
#endif
}
void printCudaError(const char *file, const char *function, int line)
{
/* 只取文件名部分,路径舍弃 */
string fileName(file);
int lastSlashPos = fileName.rfind('/');
fileName = fileName.substr(lastSlashPos + 1, fileName.size());
cudaError_t cymError = cudaGetLastError();
if (cymError)
cout << fileName << "第" << line << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
}
__host__ __device__ inline const float3 operator+(const float3 &a, const float3 &b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
__host__ __device__ inline const float3 operator-(const float3 &a, const float3 &b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
__host__ __device__ inline const float3 operator-(const float3 &a)
{
return make_float3(-a.x, -a.y, -a.z);
}
__host__ __device__ inline const float3 operator*(float a, const float3 &b)
{
return make_float3(a * b.x, a * b.y, a * b.z);
}
__host__ __device__ inline const float3 operator*(const float3 &a, float b)
{
return make_float3(a.x * b, a.y * b, a.z * b);
}
__host__ __device__ inline const float3 operator/(const float3 &a, float b)
{
return make_float3(a.x / b, a.y / b, a.z / b);
}
__host__ __device__ inline float operator*(const float3 &a, const float3 &b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
__device__ float3 cross(const float3 &a, const float3 &b)
{
return make_float3(a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x);
}
__host__ __device__ inline void operator*=(float3 &a, float b)
{
a.x *= b;
a.y *= b;
a.z *= b;
}
__host__ __device__ inline void operator/=(float3 &a, float b)
{
a.x /= b;
a.y /= b;
a.z /= b;
}
__host__ __device__ inline void operator+=(float3 &a, const float3 &b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
__host__ __device__ inline void operator-=(float3 &a, const float3 &b)
{
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
__device__ inline float length(const float3 &v)
{
return sqrt(v.x * v.x + v.y * v.y + v.z * v.z);
}
__device__ inline void normalize(float3 &v)
{
float length_inverse = 1.0 / length(v);
v *= length_inverse;
}
cublasHandle_t cublas_handle = 0;
/* 将 B 样条矩阵载入显存 */
void loadMatrixBSplineD()
{
cudaMemcpyToSymbol(matrix_b_spline_d, matrix_b_spline_f, sizeof(float) * 185);
cublasCreate(&cublas_handle);
}
static __device__ float3 newCtrlPointD[15][15][15][4][4][4]; // 使用皮本上的优化算法之后新生成的控制顶点,每个节点盒都有一个4x4x4的控制顶点
/*
* 计算采样点值的优化算法,事先对每个节点盒分别计算B样条体的控制顶点乘以Mu, Mv, Mw的结果并存储
* 本函数就是进行这个计算,算法具体思路可以看皮本
*/
__global__ void calcNewCtrlPointD(int order_u, int order_v, int order_w,
int ctrlPointNum_u, int ctrlPointNum_v, int ctrlPointNum_w)
{
int ii = blockIdx.x;
int jj = blockIdx.y;
int kk = blockIdx.z;
int leftUIdx = ii + order_u - 1;
int leftVIdx = jj + order_v - 1;
int leftWIdx = kk + order_w - 1;
float *Mu = matrixCase(matrix_b_spline_d, order_u, ctrlPointNum_u, leftUIdx);
float *Mv = matrixCase(matrix_b_spline_d, order_v, ctrlPointNum_v, leftVIdx);
float *Mw = matrixCase(matrix_b_spline_d, order_w, ctrlPointNum_w, leftWIdx);
// 第一个矩阵乘法
int base_i = leftUIdx - order_u + 1;
int base_j = leftVIdx - order_v + 1;
int base_k = leftWIdx - order_w + 1;
for (int k = 0; k < order_w; ++k)
for (int i = 0; i < order_u; ++i)
for (int j = 0; j < order_v; ++j)
{
newCtrlPointD[ii][jj][kk][i][j][k] = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < order_u; ++l)
{
float3 cp = ctrlPointD[base_i + l][base_j + j][base_k + k];
newCtrlPointD[ii][jj][kk][i][j][k] += Mu[i * order_u + l] * cp;
}
}
// 第二个矩阵乘法
float3 box[4][4][4];
for (int i = 0; i < order_u; ++i)
for (int j = 0; j < order_v; ++j)
for (int k = 0; k < order_w; ++k)
{
box[i][j][k] = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < order_v; ++l)
{
float3 cp = newCtrlPointD[ii][jj][kk][i][l][k];
box[i][j][k] += Mv[j * order_v + l] * cp;
}
}
// 第三个矩阵乘法
for (int j = 0; j < order_v; ++j)
for (int k = 0; k < order_w; ++k)
for (int i = 0; i < order_u; ++i)
{
newCtrlPointD[ii][jj][kk][i][j][k] = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < order_w; ++l)
{
float3 cp = box[i][j][l];
newCtrlPointD[ii][jj][kk][i][j][k] += Mw[k * order_w + l] * cp;
}
}
}
/*
* 将B样条体控制顶点拷贝到显存
* 另外,将皮本上新算法中的控制顶点拷贝到显存
*/
void copyCtrlPointD(CommonData *commonData)
{
for (int i = 0; i < ctrlPointNum[U]; ++i)
{
for (int j = 0; j < ctrlPointNum[V]; ++j)
{
for (int k = 0; k < ctrlPointNum[W]; ++k)
{
ctrlPoint[i][j][k].x = (float)commonData->getCtrlPoint(i, j, k).x();
ctrlPoint[i][j][k].y = (float)commonData->getCtrlPoint(i, j, k).y();
ctrlPoint[i][j][k].z = (float)commonData->getCtrlPoint(i, j, k).z();
}
}
}
cudaMemcpyToSymbol(ctrlPointD, &ctrlPoint[0][0][0], sizeof(float3) * 15 * 15 * 15);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
calcNewCtrlPointD<<<dim3(knotIntervalCount[U], knotIntervalCount[V], knotIntervalCount[W]), 1>>>
(order[U], order[V], order[W], ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
/* 预计算,将内存中的数据拷贝到相应的显存空间中 */
void preCalcD(CommonData *commonData)
{
for (int i = 0; i < 3; ++i)
{
order[i] = commonData->order(i);
ctrlPointNum[i] = commonData->ctrlPointCount(i);
knotIntervalCount[i] = commonData->knotIntervalCount(i);
knotCount[i] = order[i] + ctrlPointNum[i];
}
for (int i = 0; i < 3; ++i)
for (int j = 0; j < knotCount[i]; ++j)
knotList[i][j] = (float)commonData->getKnot(i, j);
cudaMemcpyToSymbol(knotListD, &knotList[0][0], sizeof(float) * 3 * 20);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
int vertexCount = commonData->vertexCount();
float3 *vertexParamListAlloc = new float3[vertexCount];
for (int i = 0; i < vertexCount; ++i)
{
vertexParamListAlloc[i].x = (float)commonData->vertexParam(i).u();
vertexParamListAlloc[i].y = (float)commonData->vertexParam(i).v();
vertexParamListAlloc[i].z = (float)commonData->vertexParam(i).w();
}
modelMemD += sizeof(float3) * vertexCount;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float3) * vertexCount, "@原始模型上所有顶点的参数,仅用于FFD");
cudaMalloc((void**)&vertexParamListD, sizeof(float3) * vertexCount);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
cudaMemcpy(vertexParamListD, vertexParamListAlloc, sizeof(float3) * vertexCount, cudaMemcpyHostToDevice);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
delete []vertexParamListAlloc;
vertexParamListAlloc = 0;
modelMemD += sizeof(float3) * vertexCount;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float3) * vertexCount, "@原始模型上所有顶点的坐标,仅用于FFD");
cudaMalloc((void**)&vertexCoordListD, sizeof(float3) * vertexCount);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
copyCtrlPointD(commonData);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
// teapot的顶点数据拷贝到显存
vertexCount_teapot = commonData->vertexCount_teapot();
vertexParamListAlloc = new float3[vertexCount_teapot];
for (int i = 0; i < vertexCount_teapot; ++i)
{
vertexParamListAlloc[i].x = (float)commonData->vertexParam_teapot(i).x();
vertexParamListAlloc[i].y = (float)commonData->vertexParam_teapot(i).y();
vertexParamListAlloc[i].z = (float)commonData->vertexParam_teapot(i).z();
}
modelMemD += sizeof(float3) * vertexCount_teapot;
cudaMalloc((void**)&vertexParamListD_teapot, sizeof(float3) * vertexCount_teapot);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
cudaMemcpy(vertexParamListD_teapot, vertexParamListAlloc, sizeof(float3) * vertexCount_teapot, cudaMemcpyHostToDevice);
for (int i = 0; i < vertexCount_teapot; ++i)
{
vertexParamListAlloc[i].x = (float)commonData->normalParam_teapot(i).i();
vertexParamListAlloc[i].y = (float)commonData->normalParam_teapot(i).j();
vertexParamListAlloc[i].z = (float)commonData->normalParam_teapot(i).k();
}
cudaMalloc((void**)&normalParamListD_teapot, sizeof(float3) * vertexCount_teapot);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
cudaMemcpy(normalParamListD_teapot, vertexParamListAlloc, sizeof(float3) * vertexCount_teapot, cudaMemcpyHostToDevice);
delete []vertexParamListAlloc;
}
int *matrixFittingIdxD;
float *matrixFittingD;
void loadTriangleMatrixD()
{
extern int matrixFittingIdx[100];
cudaMalloc((void**)&matrixFittingIdxD, sizeof(int) * 100);
permanentMemD += sizeof(int) * 100;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(int) * 100, "@拟合矩阵的索引矩阵");
cudaMemcpy(matrixFittingIdxD, matrixFittingIdx, sizeof(int) * 100, cudaMemcpyHostToDevice);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
extern float matrixFitting[39417];
cudaMalloc((void**)&matrixFittingD, sizeof(float) * 39417);
permanentMemD += sizeof(float) * 39417;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * 39417, "@拟合矩阵");
cudaMemcpy(matrixFittingD, matrixFitting, sizeof(float) * 39417, cudaMemcpyHostToDevice);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
struct TriangleD
{
float3 v[3], n[3], n_adj_origin[3], n_adj[3];
#ifdef LINE
float3 bary_origin[3];
#endif
int nc[3]; // nc0, nc1, nc2分别代表v2v0, v0v1, v1v2边的法向数量
float2 vt[3];
};
TriangleD *triangleListD;
float *sampleValueD, *triangleCtrlPointD;
float3 *sampleValueD_PN;
float *triangleCtrlPointD_PN, *triangleNormalCtrlPointD_PN;
int *triangle_adjacent_tableD;
int degree, degree_lower, triangleCtrlPointNum, triangleCtrlPointNum_lower, triangleNum, constrait_point_num;
int blockSizeStep0 = 128, activeThreadNumStep0, blockNumStep0;
int blockSizeStep1 = 128, activeThreadNumStep1, blockNumStep1;
int blockSizeAdjNormal = 128, activeThreadNumAdjNormal, blockNumAdjNormal;
int blockSizeStep0_PN = 128, blockNumStep0_PN;
#ifdef TRUTH
float *B_1D_truth, *sampleValueD_truth;
int activeThreadNumStep0_truth, blockNumStep0_truth;
#endif
int matrixStartIdxFitting;
__host__ __device__ inline int index2c(int i, int j, int stride)
{
return j * stride + i;
}
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
float *triangular_ctrl_points;
#endif
void loadTriangleListD(const vector<Triangle> &triangleList, int *triangle_adjacent_table, int deg)
{
triangleNum = triangleList.size();
degree = deg;
/*degree_lower = deg;*/
degree_lower = 3;
triangleCtrlPointNum = (degree + 1) * (degree + 2) / 2;
triangleCtrlPointNum_lower = (degree_lower + 1) * (degree_lower + 2) / 2;
constrait_point_num = 3 * degree_lower;
TriangleD *tempTriangleList = new TriangleD[triangleNum];
for (vector<Triangle>::size_type i = 0; i < triangleNum; ++i)
{
for (int j = 0; j < 3; ++j)
{
tempTriangleList[i].v[j].x = triangleList[i].v[j].x();
tempTriangleList[i].v[j].y = triangleList[i].v[j].y();
tempTriangleList[i].v[j].z = triangleList[i].v[j].z();
tempTriangleList[i].n[j].x = triangleList[i].n[j].i();
tempTriangleList[i].n[j].y = triangleList[i].n[j].j();
tempTriangleList[i].n[j].z = triangleList[i].n[j].k();
tempTriangleList[i].n_adj_origin[j].x = triangleList[i].n_adj[j].i();
tempTriangleList[i].n_adj_origin[j].y = triangleList[i].n_adj[j].j();
tempTriangleList[i].n_adj_origin[j].z = triangleList[i].n_adj[j].k();
tempTriangleList[i].n_adj[j].x = triangleList[i].n_adj[j].i();
tempTriangleList[i].n_adj[j].y = triangleList[i].n_adj[j].j();
tempTriangleList[i].n_adj[j].z = triangleList[i].n_adj[j].k();
#ifdef LINE
tempTriangleList[i].bary_origin[j].x = triangleList[i].bary_origin[j].x();
tempTriangleList[i].bary_origin[j].y = triangleList[i].bary_origin[j].y();
tempTriangleList[i].bary_origin[j].z = triangleList[i].bary_origin[j].z();
#endif
tempTriangleList[i].nc[j] = triangleList[i].n_count[j];
tempTriangleList[i].vt[j].x = triangleList[i].vt[j].u();
tempTriangleList[i].vt[j].y = triangleList[i].vt[j].v();
}
}
cudaMalloc((void**)&triangleListD, sizeof(TriangleD) * triangleNum);
degreeMemD += sizeof(TriangleD) * triangleNum;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(TriangleD) * triangleNum, "@原始模型上所有三角形信息");
cudaMemcpy(triangleListD, tempTriangleList, sizeof(TriangleD) * triangleNum, cudaMemcpyHostToDevice);
delete []tempTriangleList;
cudaMalloc(&sampleValueD, sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6);
degreeMemD += sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6,
"@为了求Bezier曲面片的控制顶点,需要在其上进行采样,结果放在这里。即第二个矩阵乘法用到的矩阵T");
cudaMalloc(&sampleValueD_PN, sizeof(float3) * triangleNum * 3 * 2);
cudaMalloc(&triangleCtrlPointD_PN, sizeof(float) * (1 + 2 + 3 + 4) * triangleNum * 3);
cudaMalloc(&triangleNormalCtrlPointD_PN, sizeof(float) * (1 + 2 + 3) * triangleNum * 3);
cudaMalloc(&triangleCtrlPointD, sizeof(float) * triangleCtrlPointNum_lower * triangleNum * 6);
cudaMalloc(&triangle_adjacent_tableD, sizeof(int) * triangleNum * 3);
cudaMemcpy(triangle_adjacent_tableD, triangle_adjacent_table, sizeof(int) * triangleNum * 3, cudaMemcpyHostToDevice);
#ifdef TRUTH
cudaMalloc(&sampleValueD_truth, sizeof(float) * triangleCtrlPointNum * triangleNum * 3);
degreeMemD += sizeof(float) * triangleCtrlPointNum * triangleNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * triangleCtrlPointNum * triangleNum * 3,
"@为了求精确Bezier曲面片的控制顶点,需要在其上进行采样,结果放在这里。即第二个矩阵乘法用到的矩阵T");
activeThreadNumStep0_truth = triangleCtrlPointNum * triangleNum;
blockNumStep0_truth = ceil(static_cast<double>(activeThreadNumStep0_truth) / blockSizeStep0);
#endif
activeThreadNumStep0 = triangleCtrlPointNum * triangleNum;
blockNumStep0 = ceil(static_cast<double>(activeThreadNumStep0) / blockSizeStep0);
activeThreadNumStep1 = constrait_point_num * triangleNum;
blockNumStep1 = ceil(static_cast<double>(activeThreadNumStep1) / blockSizeStep1);
activeThreadNumAdjNormal = triangleNum * 3;
blockNumAdjNormal = ceil(static_cast<double>(activeThreadNumAdjNormal) / blockSizeAdjNormal);
blockNumStep0_PN = ceil(static_cast<double>(3 * triangleNum) / blockSizeStep0_PN);
#ifdef TRUTH
extern float matrixTriangle[9][55*55];
float *temp = new float[triangleCtrlPointNum * triangleCtrlPointNum];
for (int i = 0; i < triangleCtrlPointNum; ++i)
{
for (int j = 0; j < triangleCtrlPointNum; ++j)
{
temp[index2c(i, j, triangleCtrlPointNum)] = matrixTriangle[degree - 1][i * triangleCtrlPointNum + j];
}
}
cudaMalloc(&B_1D_truth, sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum);
degreeMemD += sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum, "@第一个矩阵乘法用到的矩阵(B-1)T存放在这里");
cudaMemcpy(B_1D_truth, temp, sizeof(float) * triangleCtrlPointNum * triangleCtrlPointNum, cudaMemcpyHostToDevice);
delete temp;
#endif
/***************************************************************************/
extern int matrixFittingIdx[100];
matrixStartIdxFitting = matrixFittingIdx[degree * 10 + degree_lower];
cout << "triangleNum = " << triangleNum << endl;
cout << "degree = " << degree << ", degree_lower = " << degree_lower << ", constrait_point_num = " << constrait_point_num << endl;
cout << "triangleCtrlPointNum = " << triangleCtrlPointNum << ", triangleCtrlPointNum_lower = " << triangleCtrlPointNum_lower << endl;
cout << "activeThreadNumStep1 = " << activeThreadNumStep1 << ", blockNumStep1 = " << blockNumStep1 << endl;
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
triangular_ctrl_points = new float[3 * triangleCtrlPointNum_lower * triangleNum];
#endif
}
double power(double a, int n)
{
if (n <= 0)
return 1.0;
double result = a;
for (int i = 1; i < n; ++i)
result *= a;
return result;
}
int factorial(int n)
{
int result = 1;
for (int i = 1; i <= n; ++i)
result *= i;
return result;
}
float B(double u, double v, double w, int n, int3 c)
{
return factorial(n) / factorial(c.x) / factorial(c.y) / factorial(c.z) * power(u, c.x) * power(v, c.y) * power(w, c.z);
}
float *BqD, *BqD_PN, *BBD, *RD;
int *my_to_truth_tableD;
#ifdef TRUTH
float *BqD_truth, *BBD_truth, *RD_truth;
#endif
int segmentPerEdge, samplePointPerTriangle;
int blockSizeCopy = 256, activeThreadNumCopy, blockNumCopy;
int *my_to_truth_table;
void generateUVW(int samplePointPerEdge)
{
segmentPerEdge = samplePointPerEdge - 1;
samplePointPerTriangle = (samplePointPerEdge + 1) * samplePointPerEdge / 2;
activeThreadNumCopy = samplePointPerTriangle * triangleNum;
blockNumCopy = ceil(static_cast<double>(activeThreadNumCopy) / blockSizeCopy);
double *a = new double[samplePointPerTriangle * 3];
int idx = 0;
for (int i = segmentPerEdge; i >= 0; --i)
{
for (int j = segmentPerEdge - i; j >= 0; --j)
{
int k = segmentPerEdge - i - j;
a[idx++] = (double)i / segmentPerEdge;
a[idx++] = (double)j / segmentPerEdge;
a[idx++] = (double)k / segmentPerEdge;
}
}
float *b = new float[samplePointPerTriangle * triangleCtrlPointNum_lower];
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = degree_lower; i >= 0; --i)
{
for (int j = degree_lower - i; j >= 0; --j)
{
int k = degree_lower - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row, idx, samplePointPerTriangle)] = B(u, v, w, degree_lower, make_int3(i, j, k));
++idx;
}
}
}
float *b_PN = new float[samplePointPerTriangle * 6];
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = 2; i >= 0; --i)
{
for (int j = 2 - i; j >= 0; --j)
{
int k = 2 - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b_PN[index2c(row, idx, samplePointPerTriangle)] = B(u, v, w, 2, make_int3(i, j, k));
++idx;
}
}
}
cudaMalloc(&BqD, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower, "@第一个矩阵乘法用到的矩阵Bq存放在这里");
cudaMemcpy(BqD, b, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum_lower, cudaMemcpyHostToDevice);
cudaMalloc(&BqD_PN, sizeof(float) * samplePointPerTriangle * 6);
tessMemD += sizeof(float) * samplePointPerTriangle * 6;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * 6, "@第一个矩阵乘法用到的矩阵Bq存放在这里");
cudaMemcpy(BqD_PN, b_PN, sizeof(float) * samplePointPerTriangle * 6, cudaMemcpyHostToDevice);
/***********************************************************************************************************************************/
cudaMalloc(&BBD, sizeof(float) * samplePointPerTriangle * (triangleCtrlPointNum + constrait_point_num));
tessMemD += sizeof(float) * samplePointPerTriangle * (triangleCtrlPointNum + constrait_point_num);
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * (triangleCtrlPointNum + constrait_point_num), "@第二个矩阵乘法用到的矩阵BB存放在这里");
cudaMalloc(&RD, sizeof(float) * samplePointPerTriangle * triangleNum * 6);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleNum * 6;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleNum * 6, "@第二个矩阵乘法的结果RD存放在这里");
delete []a;
delete []b;
cudaMalloc(&my_to_truth_tableD, sizeof(int) * samplePointPerTriangle * triangleNum);
my_to_truth_table = new int[samplePointPerTriangle * triangleNum];
fill(my_to_truth_table, my_to_truth_table + samplePointPerTriangle * triangleNum, 0);
}
#ifdef TRUTH
void generateUVW_truth(int samplePointPerEdge)
{
double *a = new double[samplePointPerTriangle * 3];
int idx = 0;
for (int i = segmentPerEdge; i >= 0; --i)
{
for (int j = segmentPerEdge - i; j >= 0; --j)
{
int k = segmentPerEdge - i - j;
a[idx++] = (double)i / segmentPerEdge;
a[idx++] = (double)j / segmentPerEdge;
a[idx++] = (double)k / segmentPerEdge;
}
}
float *b = new float[samplePointPerTriangle * triangleCtrlPointNum * 3];
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = degree; i >= 0; --i)
{
for (int j = degree - i; j >= 0; --j)
{
int k = degree - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row, idx, samplePointPerTriangle * 3)] = B(u, v, w, degree, make_int3(i, j, k));
//b[row * triangleCtrlPointNum + idx] = B(u, v, w, degree, make_int3(i, j, k));
++idx;
}
}
}
/***********************************************************************************************************************************/
for (int row = 0; row < samplePointPerTriangle; ++row)
{
int idx = 0;
for (int i = degree; i >= 0; --i)
{
for (int j = degree - i; j >= 0; --j)
{
int k = degree - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row + samplePointPerTriangle, idx, samplePointPerTriangle * 3)] = factorial(degree) / (factorial(i) * factorial(j) * factorial(k)) *
(i * power(u, i - 1) * power(v, j) * power(w, k) - k * power(u, i) * power(v, j) * power(w, k - 1));
++idx;
}
}
}
/***********************************************************************************************************************************/
for (int row = 0; row < samplePointPerTriangle; ++row)
//for (int row = samplePointPerTriangle * 2; row < samplePointPerTriangle * 3; ++row)
{
int idx = 0;
for (int i = degree; i >= 0; --i)
{
for (int j = degree - i; j >= 0; --j)
{
int k = degree - i - j;
double u = a[row * 3 + 0];
double v = a[row * 3 + 1];
double w = a[row * 3 + 2];
b[index2c(row + samplePointPerTriangle * 2, idx, samplePointPerTriangle * 3)] = factorial(degree) / (factorial(i) * factorial(j) * factorial(k)) *
(j * power(u, i) * power(v, j - 1) * power(w, k) - k * power(u, i) * power(v, j) * power(w, k - 1));
++idx;
}
}
}
cudaMalloc(&BqD_truth, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3, "@第一个矩阵乘法用到的矩阵Bq存放在这里");
cudaMemcpy(BqD_truth, b, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3, cudaMemcpyHostToDevice);
/***********************************************************************************************************************************/
cudaMalloc(&BBD_truth, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3);
tessMemD += sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * triangleCtrlPointNum * 3, "@第二个矩阵乘法用到的矩阵BB存放在这里");
cudaMalloc(&RD_truth, sizeof(float) * samplePointPerTriangle * 3 * triangleNum * 3);
tessMemD += sizeof(float) * samplePointPerTriangle * 3 * triangleNum * 3;
printMemD(__FILE__, __FUNCTION__, __LINE__, sizeof(float) * samplePointPerTriangle * 3 * triangleNum * 3, "@第二个矩阵乘法的结果RD存放在这里");
delete []a;
delete []b;
}
#endif
/*
* 使用矩阵乘法求 B 样条体的值,和上面一个类似函数的区别在于不负责 u、v、w 重新参数化的工作,
* 而且也不负责求合适的 B 样条矩阵,这两项工作均需调用函数之前完成,参数列表得到简化
* 目前仅用于求truth
*/
__device__ float3 BSplineVolumeValueMatrixD2(float *Mu, float *Mv, float *Mw,
float u, float v, float w, float *shared_array,
int leftUIdx, int leftVIdx, int leftWIdx,
int orderU, int orderV, int orderW)
{
#define NB // NB表示使用比较好的算法,如果不define NB,则使用最原始的算法,逻辑也相对清晰
#ifdef NB
float *mul1 = (float *)shared_array;
float *mul2 = (float *)&mul1[blockDim.x * 4];
float *temp = (float *)&mul2[blockDim.x * 4];
// 由三维控制顶点算出二维临时控制顶点
temp[3 * threadIdx.x + 0] = w;
temp[3 * threadIdx.x + 1] = w * w;
temp[3 * threadIdx.x + 2] = w * w * w;
for (int i = 0; i < orderW; ++i)
{
mul1[4 * threadIdx.x + i] = Mw[i];
for (int j = 1; j < orderW; ++j)
mul1[4 * threadIdx.x + i] += temp[3 * threadIdx.x + j - 1] * Mw[j * orderW + i];
}
// 由二维临时控制顶点算出一维临时控制顶点
temp[3 * threadIdx.x + 0] = v;
temp[3 * threadIdx.x + 1] = v * v;
temp[3 * threadIdx.x + 2] = v * v * v;
for (int i = 0; i < orderV; ++i)
{
mul2[4 * threadIdx.x + i] = Mv[i];
for (int j = 1; j < orderV; ++j)
mul2[4 * threadIdx.x + i] += temp[3 * threadIdx.x + j - 1] * Mv[j * orderV + i];
}
float3 tempCtrlPoint2[4];
float3 tempCtrlPoint1[4];
for (int i = 0; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
tempCtrlPoint2[j] = make_float3(0.0f, 0.0f, 0.0f);
for (int k = 0; k < orderW; ++k)
{
float3 cp = ctrlPointD[leftUIdx - i][leftVIdx - j][leftWIdx - k];
tempCtrlPoint2[j] += cp * mul1[4 * threadIdx.x + orderW - 1 - k];
}
}
tempCtrlPoint1[i] = make_float3(0.0f, 0.0f, 0.0f);
for (int j = 0; j < orderV; ++j)
tempCtrlPoint1[i] += tempCtrlPoint2[j] * mul2[4 * threadIdx.x + orderV - 1 - j];
}
// 由一维临时控制顶点算出结果
temp[3 * threadIdx.x + 0] = u;
temp[3 * threadIdx.x + 1] = u * u;
temp[3 * threadIdx.x + 2] = u * u * u;
for (int i = 0; i < orderU; ++i)
{
mul1[4 * threadIdx.x + i] = Mu[i];
for (int j = 1; j < orderU; ++j)
mul1[4 * threadIdx.x + i] += temp[3 * threadIdx.x + j - 1] * Mu[j * orderU + i];
}
float3 result = make_float3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < orderU; ++i)
result += tempCtrlPoint1[i] * mul1[4 * threadIdx.x + orderU - 1 - i];
return result;
/*-------------------------------------------------------------------------------------------------*/
#else
// 第一个矩阵乘法
int base_i = leftUIdx - orderU + 1;
int base_j = leftVIdx - orderV + 1;
int base_k = leftWIdx - orderW + 1;
float3 box[4][4][4], temp;
for (int k = 0; k < orderW; ++k)
for (int i = 0; i < orderU; ++i)
for (int j = 0; j < orderV; ++j)
{
temp = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < orderU; ++l)
{
float3 cp = ctrlPointD[base_i + l][base_j + j][base_k + k];
temp += Mu[i * orderU + l] * cp;
}
box[i][j][k] = temp;
}
// 第二个矩阵乘法
float3 box1[4][4][4];
for (int i = 0; i < orderU; ++i)
for (int j = 0; j < orderV; ++j)
for (int k = 0; k < orderW; ++k)
{
temp = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < orderV; ++l)
{
float3 cp = box[i][l][k];
temp += Mv[j * orderV + l] * cp;
}
box1[i][j][k] = temp;
}
// 第三个矩阵乘法
for (int j = 0; j < orderV; ++j)
for (int k = 0; k < orderW; ++k)
for (int i = 0; i < orderU; ++i)
{
temp = make_float3(0.0, 0.0, 0.0);
for (int l = 0; l < orderW; ++l)
{
float3 cp = box1[i][j][l];
temp += Mw[k * orderW + l] * cp;
}
box[i][j][k] = temp;
}
// 由三维控制顶点算出二维临时控制顶点
float t[4];
t[0] = 1.0f;
t[1] = u;
t[2] = u * u;
t[3] = t[2] * u;
float3 cp2D[4][4];
for (int j = 0; j < orderV; ++j)
for (int k = 0; k < orderW; ++k)
{
cp2D[j][k] = make_float3(0.0f, 0.0f, 0.0f);
for (int i = 0; i < orderU; ++i)
{
cp2D[j][k] += t[i] * box[i][j][k];
}
}
// 由二维临时控制顶点算出一维临时控制顶点
t[1] = v;
t[2] = v * v;
t[3] = t[2] * v;
float3 cp1D[4];
for (int k = 0; k < orderW; ++k)
{
cp1D[k] = make_float3(0.0f, 0.0f, 0.0f);
for (int j = 0; j < orderV; ++j)
cp1D[k] += t[j] * cp2D[j][k];
}
// 由一维临时控制顶点算出结果
t[1] = w;
t[2] = w * w;
t[3] = t[2] * w;
temp = make_float3(0.0f, 0.0f, 0.0f);
for (int k = 0; k < orderW; ++k)
temp += t[k] * cp1D[k];
return temp;
#endif
}
/* 新的合并算法 */
__device__ void BSplineVolumeValueMatrixD_combine(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW,
float3 &f, float3 &fu, float3 &fv)
{
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
float *tu = &shared_array[base3];
float *tu_ = &shared_array[blockDim.x * 3 + base2];
float *tv = &shared_array[blockDim.x * 5 + base3];
float *tv_ = &shared_array[blockDim.x * 8 + base2];
float *tw = &shared_array[blockDim.x * 10 + base3];
tu[0] = u; tu[1] = u * u, tu[2] = u * tu[1];
tu_[0] = 2 * u; tu_[1] = 3 * tu[1];
tv[0] = v; tv[1] = v * v; tv[2] = v * tv[1];
tv_[0] = 2 * v; tv_[1] = 3 * tv[1];
tw[0] = w; tw[1] = w * w; tw[2] = w * tw[1];
/************* 将i = 0 提到前面,减少tu和tu_数组的大小 ****************/
/******** orderU至少是2,所以这里可以将i = 0的情况提到for之外 *********/
float3 cp2D[4];
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][0][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][0][j][k];
}
// orderV至少是2,所以这里可以将tv[0] * cp2D[1]提到for之外
float3 cp1D = cp2D[0] + tv[0] * cp2D[1], cp1Dv = cp2D[1];
for (int j = 2; j < orderV; ++j)
{
cp1D += tv[j - 1] * cp2D[j];
cp1Dv += tv_[j - 2] * cp2D[j];
}
f = cp1D;
fv = cp1Dv;
/*************** 将i = 1 提到前面,减少tu_数组的大小 ******************/
/******** orderU至少是2,所以这里可以将i = 1的情况提到for之外 *********/
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][1][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][1][j][k];
}
// orderV至少是2,所以这里可以将tv[0] * cp2D[1]提到for之外
cp1D = cp2D[0] + tv[0] * cp2D[1];
cp1Dv = cp2D[1];
for (int j = 2; j < orderV; ++j)
{
cp1D += tv[j - 1] * cp2D[j];
cp1Dv += tv_[j - 2] * cp2D[j];
}
f += tu[0] * cp1D;
fu = cp1D;
fv += tu[0] * cp1Dv;
/*********************************************************************/
for (int i = 2; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][i][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
// orderV至少是2,所以这里可以将tv[0] * cp2D[1]提到for之外
cp1D = cp2D[0] + tv[0] * cp2D[1];
cp1Dv = cp2D[1];
for (int j = 2; j < orderV; ++j)
{
cp1D += tv[j - 1] * cp2D[j];
cp1Dv += tv_[j - 2] * cp2D[j];
}
f += tu[i - 1] * cp1D;
fu += tu_[i - 2] * cp1D;
fv += tu[i - 1] * cp1Dv;
}
}
/* 求采样点在u方向的偏导,由优化之后的采样点求值算法改造而来 */
__device__ float3 BSplineVolumeValueMatrixDu(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW)
{
float *tu = (float *)shared_array;
float *tv = (float *)&tu[blockDim.x * 2];
float *tw = (float *)&tv[blockDim.x * 3];
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
tu[base2] = 2 * u; tu[base2 + 1] = 3 * u * u;
tv[base3] = v; tv[base3 + 1] = v * v; tv[base3 + 2] = v * v * v;
tw[base3] = w; tw[base3 + 1] = w * w, tw[base3 + 2] = w * w * w;
// 一步完成三维控制顶点->二维临时控制顶点->一维临时控制顶点->结果
float3 cp2D[4], cp1D, result;
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][1][j][0];
for (int k = 1; k < orderU; ++k)
cp2D[j] += tw[base3 + k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][1][j][k];
}
cp1D = cp2D[0];
for (int j = 1; j < orderV; ++j)
cp1D += tv[base3 + j - 1] * cp2D[j];
result = cp1D;
// 为了把tu从[3]缩成[2],将i=0的情况提到了前面
for (int i = 2; i < orderU; ++i)
{
for (int j = 0; j < orderV; ++j)
{
cp2D[j] = newCtrlPointD[i_idx][j_idx][k_idx][i][j][0];
for (int k = 1; k < orderW; ++k)
cp2D[j] += tw[base3 + k - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
cp1D = cp2D[0];
for (int j = 1; j < orderV; ++j)
cp1D += tv[base3 + j - 1] * cp2D[j];
result += tu[base2 + i - 2] * cp1D;
}
return result;
/*-----------------------------------------------------*/
/*float tu[4], tv[4], tw[4];*/
/*tu[0] = 0; tu[1] = 1; tu[2] = 2 * u; tu[3] = 3 * u * u;*/
/*tv[0] = 1; tv[1] = v; tv[2] = v * v; tv[3] = v * v * v;*/
/*tw[0] = 1; tw[1] = w; tw[2] = w * w, tw[3] = w * w * w;*/
/*// 一步完成三维控制顶点->二维临时控制顶点->一维临时控制顶点->结果*/
/*float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int k = 0; k < orderW; ++k)*/
/*{*/
/*for (int j = 0; j < orderV; ++j)*/
/*{*/
/*cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int i = 0; i < orderU; ++i)*/
/*cp2D[j] += tu[i] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];*/
/*}*/
/*cp1D = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int j = 0; j < orderV; ++j)*/
/*cp1D += tv[j] * cp2D[j];*/
/*result += tw[k] * cp1D;*/
/*}*/
/*return result;*/
}
/* 求采样点在v方向的偏导,由优化之后的采样点求值算法改造而来 */
__device__ float3 BSplineVolumeValueMatrixDv(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW)
{
float *tu = (float *)shared_array;
float *tv = (float *)&tu[blockDim.x * 3];
float *tw = (float *)&tv[blockDim.x * 2];
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
tu[base3] = u; tu[base3 + 1] = u * u; tu[base3 + 2] = u * u * u;
tv[base2] = 2 * v; tv[base2 + 1] = 3 * v * v;
tw[base3] = w; tw[base3 + 1] = w * w, tw[base3 + 2] = w * w * w;
// 一步完成三维控制顶点->二维临时控制顶点->一维临时控制顶点->结果
float3 cp2D[4], cp1D, result;
for (int k = 0; k < orderW; ++k)
{
cp2D[k] = newCtrlPointD[i_idx][j_idx][k_idx][0][1][k];
for (int i = 1; i < orderU; ++i)
cp2D[k] += tu[base3 + i - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][1][k];
}
cp1D = cp2D[0];
for (int k = 1; k < orderW; ++k)
cp1D += tw[base3 + k - 1] * cp2D[k];
result = cp1D;
// 为了把tv从[3]缩成[2],将j=0的情况提到了前面
for (int j = 2; j < orderV; ++j)
{
for (int k = 0; k < orderW; ++k)
{
cp2D[k] = newCtrlPointD[i_idx][j_idx][k_idx][0][j][k];
for (int i = 1; i < orderU; ++i)
cp2D[k] += tu[base3 + i - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
cp1D = cp2D[0];
for (int k = 1; k < orderW; ++k)
cp1D += tw[base3 + k - 1] * cp2D[k];
result += tv[base2 + j - 2] * cp1D;
}
return result;
/*-----------------------------------------------------*/
/*float tu[4], tv[4], tw[4];*/
/*tu[0] = 1; tu[1] = u; tu[2] = u * u; tu[3] = u * u * u;*/
/*tv[0] = 0; tv[1] = 1; tv[2] = 2 * v; tv[3] = 3 * v * v;*/
/*tw[0] = 1; tw[1] = w; tw[2] = w * w, tw[3] = w * w * w;*/
/*// 一步完成三维控制顶点->二维临时控制顶点->一维临时控制顶点->结果*/
/*float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int k = 0; k < orderW; ++k)*/
/*{*/
/*for (int j = 0; j < orderV; ++j)*/
/*{*/
/*cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int i = 0; i < orderU; ++i)*/
/*cp2D[j] += tu[i] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];*/
/*}*/
/*cp1D = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int j = 0; j < orderV; ++j)*/
/*cp1D += tv[j] * cp2D[j];*/
/*result += tw[k] * cp1D;*/
/*}*/
/*return result;*/
}
/* 求采样点在w方向的偏导,由优化之后的采样点求值算法改造而来 */
__device__ float3 BSplineVolumeValueMatrixDw(float u, float v, float w, float *shared_array,
int i_idx, int j_idx, int k_idx,
int orderU, int orderV, int orderW)
{
int base2 = 2 * threadIdx.x;
int base3 = 3 * threadIdx.x;
float *tu = &shared_array[base3];
float *tv = &shared_array[blockDim.x * 3 + base3];
float *tw = &shared_array[blockDim.x * 6 + base2];
tu[0] = u; tu[1] = u * u; tu[2] = u * tu[1];
tv[0] = v; tv[1] = v * v; tv[2] = v * tv[1];
tw[0] = 2 * w; tw[1] = 3 * w * w;
float3 cp2D[4], cp1D, result;
for (int i = 0; i < orderU; ++i)
{
cp2D[i] = newCtrlPointD[i_idx][j_idx][k_idx][i][0][1];
for (int j = 1; j < orderV; ++j)
cp2D[i] += tv[j - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][1];
}
cp1D = cp2D[0];
for (int i = 1; i < orderU; ++i)
cp1D += tu[i - 1] * cp2D[i];
result = cp1D;
// 为了把tw从[3]缩成[2],将k = 1的情况提到了前面
for (int k = 2; k < orderW; ++k)
{
for (int i = 0; i < orderU; ++i)
{
cp2D[i] = newCtrlPointD[i_idx][j_idx][k_idx][i][0][k];
for (int j = 1; j < orderV; ++j)
cp2D[i] += tv[j - 1] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
}
cp1D = cp2D[0];
for (int i = 1; i < orderU; ++i)
cp1D += tu[i - 1] * cp2D[i];
result += tw[k - 2] * cp1D;
}
return result;
/*-----------------------------------------------------*/
//float tu[4], tv[4], tw[4];
//tu[0] = 1; tu[1] = u; tu[2] = u * u; tu[3] = u * u * u;
//tv[0] = 1; tv[1] = v; tv[2] = v * v, tv[3] = v * v * v;
//tw[0] = 0; tw[1] = 1; tw[2] = 2 * w; tw[3] = 3 * w * w;
//float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);
//for (int i = 0; i < orderU; ++i)
//{
//for (int j = 0; j < orderV; ++j)
//{
//cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);
//for (int k = 0; k < orderW; ++k)
//cp2D[j] += tw[k] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];
//}
//cp1D = make_float3(0.0f, 0.0f, 0.0f);
//for (int j = 0; j < orderV; ++j)
//cp1D += tv[j] * cp2D[j];
//result += tu[i] * cp1D;
//}
//return result;
/*-----------------------------------------------------*/
// 最原始的算法
/*float tu[4], tv[4], tw[4];*/
/*tu[0] = 1; tu[1] = u; tu[2] = u * u; tu[3] = u * u * u;*/
/*tv[0] = 1; tv[1] = v; tv[2] = v * v, tv[3] = v * v * v;*/
/*tw[0] = 0; tw[1] = 1; tw[2] = 2 * w; tw[3] = 3 * w * w;*/
/*float3 cp2D[4], cp1D, result = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int k = 0; k < orderW; ++k)*/
/*{*/
/*for (int j = 0; j < orderV; ++j)*/
/*{*/
/*cp2D[j] = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int i = 0; i < orderU; ++i)*/
/*cp2D[j] += tu[i] * newCtrlPointD[i_idx][j_idx][k_idx][i][j][k];*/
/*}*/
/*cp1D = make_float3(0.0f, 0.0f, 0.0f);*/
/*for (int j = 0; j < orderV; ++j)*/
/*cp1D += tv[j] * cp2D[j];*/
/*result += tw[k] * cp1D;*/
/*}*/
/*return result;*/
}
__global__ void calcCtrlPoint_PN(TriangleD *triangleListD, int *triangle_adjacent_tableD, float3 *sampleValueD_PN, float *triangleCtrlPointD_PN, float *triangleNormalCtrlPointD_PN, int f, int m_)
{
int triangleIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (triangleIdx >= f)
return;
int adj_face_idx[3];
adj_face_idx[0] = triangle_adjacent_tableD[triangleIdx * 3];
adj_face_idx[1] = triangle_adjacent_tableD[triangleIdx * 3 + 1];
adj_face_idx[2] = triangle_adjacent_tableD[triangleIdx * 3 + 2];
//int adj_edge_idx[3] = { -1, -1, -1 }; // 实际上应该使用这一句,以此判断有没有相邻三角形
int adj_edge_idx[3] = { 0, 0, 0 }; // 但是对于某些模型使用上一句会出现内存越界,所以暂且使用这一句,权宜之计
//bool handle[3] = { false, false, false }; // 在该边有一个以上法向时,是否处理这条边。当这条边有相邻面片才需要处理
int adj_corner_ctrlpoint_idx[3][2] = { { 0, 2 }, { 1, 0 }, { 2, 1 } }; // 相邻三角形0, 1, 2号边上的控制顶点编号(仅有角点,没有边点)
for (int i = 0; i < 3; ++i)
if (adj_face_idx[i] >= 0)
{
adj_edge_idx[i] = adj_face_idx[i] & 0x3;
adj_face_idx[i] = adj_face_idx[i] >> 2;
//handle[i] = true;
}
//printf("edge_id = (%d, %d, %d), face_id = (%d, %d, %d)\n", adj_edge_idx[0], adj_edge_idx[1], adj_edge_idx[2],
//adj_face_idx[0], adj_face_idx[1], adj_face_idx[2]);
int n_count[3];
n_count[0] = triangleListD[triangleIdx].nc[0];
n_count[1] = triangleListD[triangleIdx].nc[1];
n_count[2] = triangleListD[triangleIdx].nc[2];
float *p_x = &triangleCtrlPointD_PN[m_ * triangleIdx];
float *p_y = &triangleCtrlPointD_PN[m_ * (f + triangleIdx)];
float *p_z = &triangleCtrlPointD_PN[m_ * (f * 2 + triangleIdx)];
float3 v0 = sampleValueD_PN[triangleIdx * 3];
float3 v1 = sampleValueD_PN[triangleIdx * 3 + 1];
float3 v2 = sampleValueD_PN[triangleIdx * 3 + 2];
float3 n0 = sampleValueD_PN[(f + triangleIdx) * 3];
float3 n1 = sampleValueD_PN[(f + triangleIdx) * 3 + 1];
float3 n2 = sampleValueD_PN[(f + triangleIdx) * 3 + 2];
normalize(n0);
normalize(n1);
normalize(n2);
/*********************** 计算几何控制顶点 **********************/
p_x[0] = v0.x; p_y[0] = v0.y; p_z[0] = v0.z; // 控制顶点0
p_x[6] = v1.x; p_y[6] = v1.y; p_z[6] = v1.z; // 控制顶点6
p_x[9] = v2.x; p_y[9] = v2.y; p_z[9] = v2.z; // 控制顶点9
float3 e = make_float3(0.0f, 0.0f, 0.0f);
float3 v01 = v1 - v0;
float3 result;
if (n_count[1] < 2) // 该条边只有一个法向
{
result = (v0 * 2 + v1 - n0 * (v01 * n0)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[1]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[1]][0]];
float3 n_ave = cross(n0, n_oppo);
normalize(n_ave);
result = v0 + v01 * n_ave / 3 * n_ave;
}
e += result;
p_x[1] = result.x; p_y[1] = result.y; p_z[1] = result.z; // 控制顶点1
float3 v02 = v2 - v0;
if (n_count[0] < 2) // 该条边只有一个法向
{
result = (v0 * 2 + v2 - n0 * (v02 * n0)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[0]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[0]][1]];
float3 n_ave = cross(n0, n_oppo);
normalize(n_ave);
result = v0 + v02 * n_ave / 3 * n_ave;
}
e += result;
p_x[2] = result.x; p_y[2] = result.y; p_z[2] = result.z; // 控制顶点2
float3 v10 = v0 - v1;
if (n_count[1] < 2) // 该条边只有一个法向
{
result = (v1 * 2 + v0 - n1 * (v10 * n1)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[1]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[1]][1]];
float3 n_ave = cross(n1, n_oppo);
normalize(n_ave);
result = v1 + v10 * n_ave / 3 * n_ave;
}
e += result;
p_x[3] = result.x; p_y[3] = result.y; p_z[3] = result.z; // 控制顶点3
float3 v12 = v2 - v1;
if (n_count[2] < 2) // 该条边只有一个法向
{
result = (v1 * 2 + v2 - n1 * (v12 * n1)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[2]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[2]][0]];
float3 n_ave = cross(n1, n_oppo);
normalize(n_ave);
result = v1 + v12 * n_ave / 3 * n_ave;
}
e += result;
p_x[7] = result.x; p_y[7] = result.y; p_z[7] = result.z; // 控制顶点7
float3 v20 = v0 - v2;
if (n_count[0] < 2) // 该条边只有一个法向
{
result = (v2 * 2 + v0 - n2 * (v20 * n2)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[0]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[0]][0]];
float3 n_ave = cross(n2, n_oppo);
normalize(n_ave);
result = v2 + v20 * n_ave / 3 * n_ave;
}
e += result;
p_x[5] = result.x; p_y[5] = result.y; p_z[5] = result.z; // 控制顶点5
float3 v21 = v1 - v2;
if (n_count[2] < 2) // 该条边只有一个法向
{
result = (v2 * 2 + v1 - n2 * (v21 * n2)) / 3;
}
else
{
float3 n_oppo = triangleListD[adj_face_idx[2]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[2]][1]];
float3 n_ave = cross(n2, n_oppo);
normalize(n_ave);
result = v2 + v21 * n_ave / 3 * n_ave;
}
e += result;
p_x[8] = result.x; p_y[8] = result.y; p_z[8] = result.z; // 控制顶点8
e /= 6;
float3 v_total = (v0 + v1 + v2) / 3;
result = e + (e - v_total) / 2;
p_x[4] = result.x; p_y[4] = result.y; p_z[4] = result.z; // 控制顶点4
/*********************** 计算法向控制顶点 **********************/
p_x = &triangleNormalCtrlPointD_PN[6 * triangleIdx];
p_y = &triangleNormalCtrlPointD_PN[6 * (f + triangleIdx)];
p_z = &triangleNormalCtrlPointD_PN[6 * (f * 2 + triangleIdx)];
p_x[0] = n0.x; p_y[0] = n0.y; p_z[0] = n0.z; // 控制顶点0
p_x[3] = n1.x; p_y[3] = n1.y; p_z[3] = n1.z; // 控制顶点3
p_x[5] = n2.x; p_y[5] = n2.y; p_z[5] = n2.z; // 控制顶点5
float value01 = 2 * v01 * (n0 + n1) / (v01 * v01);
result = n0 + n1 - value01 * v01;
normalize(result);
p_x[1] = result.x; p_y[1] = result.y; p_z[1] = result.z; // 控制顶点1
float value12 = 2 * v12 * (n1 + n2) / (v12 * v12);
result = n1 + n2 - value12 * v12;
normalize(result);
p_x[4] = result.x; p_y[4] = result.y; p_z[4] = result.z; // 控制顶点4
float value20 = 2 * v20 * (n2 + n0) / (v20 * v20);
result = n2 + n0 - value20 * v20;
normalize(result);
p_x[2] = result.x; p_y[2] = result.y; p_z[2] = result.z; // 控制顶点2
}
__global__ void calcSampleValueThread_PN(TriangleD *triangleListD, float3 *sampleValueD_PN,
int f, int n, int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= 3 * f)
return;
int triangleIdx = globalIdx / 3;
TriangleD &triangle = triangleListD[triangleIdx];
int localIdx = globalIdx % 3;
float3 vertex = triangle.v[localIdx];
float u = vertex.x;
float v = vertex.y;
float w = vertex.z;
//float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) * 0.5;
//int floor = rintf(tempFloorFloat);
//if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
//floor = ceilf(tempFloorFloat);
//int room = localIdx - (floor + 1) * floor * 0.5;
//float3 barycentric_coord;
//barycentric_coord.x = (float)(n - floor) / n;
//barycentric_coord.y = (float)(floor - room) / n;
//barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
//float3 v0 = triangle.v[0];
//float3 v1 = triangle.v[1];
//float3 v2 = triangle.v[2];
//// u, v, w 表示经过重心坐标插值之后的采样点的x, y, z分量
//float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
//float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
//float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
// u, v, w方向节点区间数量
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
// 预先将其值设为最大,将末端点归入最后一段
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// 沿 U 方向查找当前点所在的节点区间
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// 沿 V 方向查找当前点所在的节点区间
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// 沿 W 方向查找当前点所在的节点区间
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// 算出该线程负责的采样点的 B 样条体值
// fu 表示J_bar矩阵第一列三个元素:偏F_bar_x偏u、偏F_bar_y偏u、偏F_bar_z偏u
// fv 表示J_bar矩阵第二列三个元素:偏F_bar_x偏v、偏F_bar_y偏v、偏F_bar_z偏v
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
//sampleValueD[index2c(localIdx, triangleIdx , 3)] = result.x;
//sampleValueD[index2c(localIdx, triangleIdx + f , 3)] = result.y;
//sampleValueD[index2c(localIdx, triangleIdx + f * 2, 3)] = result.z;
sampleValueD_PN[3 * triangleIdx + localIdx].x = result.x;
sampleValueD_PN[3 * triangleIdx + localIdx].y = result.y;
sampleValueD_PN[3 * triangleIdx + localIdx].z = result.z;
//printf("%d: result = (%f, %f, %f)\n", globalIdx, result.x, result.y, result.z);
//printf("%d: result = (%f, %f, %f)\n", threadIdx.x, result.x, result.y, result.z);
///////////////////////////////////////////////////////////////////////////////
// fw 表示J_bar矩阵第三列三个元素:偏F_bar_x偏w、偏F_bar_y偏w、偏F_bar_z偏w
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
//v0 = triangle.n[0];
//v1 = triangle.n[1];
//v2 = triangle.n[2];
//// u, v, w 表示经过重心坐标插值之后的法向的x, y, z分量
//u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
//v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
//w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
vertex = triangle.n[localIdx];
u = vertex.x;
v = vertex.y;
w = vertex.z;
float3 *sampleNormalD_PN = sampleValueD_PN + 3 * f;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第一行三个元素
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
//sampleNormalD[index2c(localIdx, triangleIdx, 3)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
sampleNormalD_PN[3 * triangleIdx + localIdx].x = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第二行三个元素
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
sampleNormalD_PN[3 * triangleIdx + localIdx].y = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第三行三个元素
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
sampleNormalD_PN[3 * triangleIdx + localIdx].z = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
__global__ void calcSampleValueThread(TriangleD *triangleListD, float *sampleValueD,
int activeThreadNum, int m, int f, int c, int n,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNum)
return;
int triangleIdx = globalIdx / m;
TriangleD &triangle = triangleListD[triangleIdx];
int localIdx = globalIdx % m;
float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) * 0.5;
int floor = rintf(tempFloorFloat);
if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
floor = ceilf(tempFloorFloat);
int room = localIdx - ((floor + 1) * floor >> 1);
float3 barycentric_coord;
barycentric_coord.x = (float)(n - floor) / n;
barycentric_coord.y = (float)(floor - room) / n;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
float3 v0 = triangle.v[0];
float3 v1 = triangle.v[1];
float3 v2 = triangle.v[2];
// u, v, w 表示经过重心坐标插值之后的采样点的x, y, z分量
float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
// u, v, w方向节点区间数量
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
// 预先将其值设为最大,将末端点归入最后一段
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// 沿 U 方向查找当前点所在的节点区间
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// 沿 V 方向查找当前点所在的节点区间
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// 沿 W 方向查找当前点所在的节点区间
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// 算出该线程负责的采样点的 B 样条体值
// fu 表示J_bar矩阵第一列三个元素:偏F_bar_x偏u、偏F_bar_y偏u、偏F_bar_z偏u
// fv 表示J_bar矩阵第二列三个元素:偏F_bar_x偏v、偏F_bar_y偏v、偏F_bar_z偏v
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
sampleValueD[index2c(localIdx, triangleIdx , m + c)] = result.x;
sampleValueD[index2c(localIdx, triangleIdx + f , m + c)] = result.y;
sampleValueD[index2c(localIdx, triangleIdx + f * 2, m + c)] = result.z;
///////////////////////////////////////////////////////////////////////////////
// fw 表示J_bar矩阵第三列三个元素:偏F_bar_x偏w、偏F_bar_y偏w、偏F_bar_z偏w
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
v0 = triangle.n[0];
v1 = triangle.n[1];
v2 = triangle.n[2];
// u, v, w 表示经过重心坐标插值之后的法向的x, y, z分量
u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
float *sampleNormalD = sampleValueD + 3 * f * (m + c);
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第一行三个元素
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
sampleNormalD[index2c(localIdx, triangleIdx, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第二行三个元素
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
sampleNormalD[index2c(localIdx, triangleIdx + f, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第三行三个元素
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
sampleNormalD[index2c(localIdx, triangleIdx + f * 2, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
__global__ void calcConstraitSampleValueThread(TriangleD *triangleListD, float *sampleValueD,
int activeThreadNum, int m, int f, int c, int n_,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNum)
return;
int triangleIdx = globalIdx / c;
TriangleD &triangle = triangleListD[triangleIdx];
int localIdx = globalIdx % c;
int floor = -1, room = -1;
if (localIdx < 2 * n_ - 1)
{
floor = (localIdx + 1) / 2;
if (localIdx % 2 == 1)
room = 0;
else
room = floor;
}
else
{
floor = n_;
room = localIdx - (2 * n_ - 1);
}
float3 barycentric_coord;
barycentric_coord.x = (float)(n_ - floor) / n_;
barycentric_coord.y = (float)(floor - room) / n_;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
float3 v0 = triangle.v[0];
float3 v1 = triangle.v[1];
float3 v2 = triangle.v[2];
// u, v, w 表示经过重心坐标插值之后的采样点的x, y, z分量
float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
// u, v, w方向节点区间数量
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
// 预先将其值设为最大,将末端点归入最后一段
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// 沿 U 方向查找当前点所在的节点区间
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// 沿 V 方向查找当前点所在的节点区间
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// 沿 W 方向查找当前点所在的节点区间
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// 算出该线程负责的采样点的 B 样条体值f
// fu 表示J_bar矩阵第一列三个元素:偏F_bar_x偏u、偏F_bar_y偏u、偏F_bar_z偏u
// fv 表示J_bar矩阵第二列三个元素:偏F_bar_x偏v、偏F_bar_y偏v、偏F_bar_z偏v
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
sampleValueD[index2c(localIdx + m, triangleIdx , m + c)] = result.x;
sampleValueD[index2c(localIdx + m, triangleIdx + f , m + c)] = result.y;
sampleValueD[index2c(localIdx + m, triangleIdx + f * 2, m + c)] = result.z;
////////////////////////////////////////////////////////////////////////////
// fw 表示J_bar矩阵第三列三个元素:偏F_bar_x偏w、偏F_bar_y偏w、偏F_bar_z偏w
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
v0 = triangle.n[0];
v1 = triangle.n[1];
v2 = triangle.n[2];
// u, v, w 表示经过重心坐标插值之后的法向的x, y, z分量
u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
float *sampleNormalD = sampleValueD + 3 * f * (m + c);
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第一行三个元素
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
sampleNormalD[index2c(localIdx + m, triangleIdx, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第二行三个元素
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
sampleNormalD[index2c(localIdx + m, triangleIdx + f, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第三行三个元素
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
sampleNormalD[index2c(localIdx + m, triangleIdx + f * 2, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
__global__ void calcAdjustNormal(TriangleD *triangleListD, int f,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
int triangleIdx = globalIdx / 3;
if (triangleIdx >= f)
return;
int i = globalIdx % 3;
float3 vertex = triangleListD[triangleIdx].v[i];
float u = vertex.x;
float v = vertex.y;
float w = vertex.z;
// u, v, w方向节点区间数量
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
// 预先将其值设为最大,将末端点归入最后一段
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// 沿 U 方向查找当前点所在的节点区间
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// 沿 V 方向查找当前点所在的节点区间
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// 沿 W 方向查找当前点所在的节点区间
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// fu 表示J_bar矩阵第一列三个元素:偏F_bar_x偏u、偏F_bar_y偏u、偏F_bar_z偏u
float3 fu = BSplineVolumeValueMatrixDu(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
__syncthreads();
// fv 表示J_bar矩阵第二列三个元素:偏F_bar_x偏v、偏F_bar_y偏v、偏F_bar_z偏v
float3 fv = BSplineVolumeValueMatrixDv(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
__syncthreads();
// fw 表示J_bar矩阵第三列三个元素:偏F_bar_x偏w、偏F_bar_y偏w、偏F_bar_z偏w
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
__syncthreads();
vertex = triangleListD[triangleIdx].n_adj_origin[i];
u = vertex.x;
v = vertex.y;
w = vertex.z;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第一行三个元素
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
triangleListD[triangleIdx].n_adj[i].x = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第二行三个元素
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
triangleListD[triangleIdx].n_adj[i].y = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第三行三个元素
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
triangleListD[triangleIdx].n_adj[i].z = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
}
void calcSampleValue(AlgorithmType algo_type)
{
if (algo_type == CYM)
{
// 计算采样点的值和法向
//calcSampleValueThread<<<blockNumStep0, blockSizeStep0, sizeof(float) * blockSizeStep0 * 9>>>
calcSampleValueThread<<<blockNumStep0, blockSizeStep0, sizeof(float) * blockSizeStep0 * 13>>>
(triangleListD, sampleValueD,
activeThreadNumStep0, triangleCtrlPointNum, triangleNum, constrait_point_num,
degree, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
// 计算约束点的值和法向
//calcConstraitSampleValueThread<<<blockNumStep1, blockSizeStep1, sizeof(float) * blockSizeStep1 * 9>>>
calcConstraitSampleValueThread<<<blockNumStep1, blockSizeStep1, sizeof(float) * blockSizeStep1 * 13>>>
(triangleListD, sampleValueD,
activeThreadNumStep1, triangleCtrlPointNum, triangleNum, constrait_point_num,
degree_lower, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
}
else
{
calcSampleValueThread_PN<<<blockNumStep0_PN, blockSizeStep0_PN, sizeof(float) * blockSizeStep1 * 13>>>
(triangleListD, sampleValueD_PN,
triangleNum, degree, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
}
//cudaError_t error = cudaGetLastError();
//if (error != 0)
//{
//cout << "第0步出错\t";
//printf("CUDA error: %s\n", cudaGetErrorString(error));
//}
//error = cudaGetLastError();
//if (error != 0)
//{
//cout << "第一步出错\t";
//printf("CUDA error: %s\n", cudaGetErrorString(error));
//}
//float3 *test = new float3[triangleNum * 3];
//for (int i = 0; i < triangleNum * 3; ++i)
//test[i] = make_float3(1.0f, 2.0f, 3.0f);
//cudaMemcpy(test, sampleValueD_PN, sizeof(float3) * triangleNum * 3, cudaMemcpyDeviceToHost);
//for (int i = 0; i < triangleNum; ++i)
//{
//cout << test[i * 3].x << ", " << test[i * 3].y << ", " << test[i * 3].z << endl;
//cout << test[i * 3 + 1].x << ", " << test[i * 3 + 1].y << ", " << test[i * 3 + 1].z << endl;
//cout << test[i * 3 + 2].x << ", " << test[i * 3 + 2].y << ", " << test[i * 3 + 2].z << endl;
//cout << "==============" << endl;
//}
//delete []test;
//float *test = new float[(triangleCtrlPointNum + constrait_point_num) * triangleNum * 6];
//cudaMemcpy(test, sampleValueD, sizeof(float) * (triangleCtrlPointNum + constrait_point_num) * triangleNum * 6, cudaMemcpyDeviceToHost);
//float *n = test + (triangleCtrlPointNum + constrait_point_num) * triangleNum * 3;
//for (int i = 0; i < triangleNum; ++i)
//{
//cout << "i = " << i << endl;
//for (int j = 0; j < constrait_point_num; ++j)
//{
//cout << "\t" << j << " " << test[i * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< test[(i + triangleNum) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< test[(i + triangleNum * 2) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << "\t";
//cout << n[i * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< n[(i + triangleNum) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << " "
//<< n[(i + triangleNum * 2) * (triangleCtrlPointNum + constrait_point_num) + triangleCtrlPointNum + j] << endl;
//}
//}
//delete []test;
}
#ifdef TRUTH
__global__ void calcSampleValueThread_truth(TriangleD *triangleListD, float *sampleValueD_truth,
int activeThreadNum, int m, int f, int n,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNum)
return;
int triangleIdx = globalIdx / m;
int localIdx = globalIdx % m;
float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) / 2;
int floor = rintf(tempFloorFloat);
if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
floor = ceilf(tempFloorFloat);
int room = localIdx - (floor + 1) * floor / 2;
float3 barycentric_coord;
barycentric_coord.x = (float)(n - floor) / n;
barycentric_coord.y = (float)(floor - room) / n;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
TriangleD &triangle = triangleListD[triangleIdx];
float3 v0 = triangle.v[0];
float3 v1 = triangle.v[1];
float3 v2 = triangle.v[2];
float u = v0.x * barycentric_coord.x + v1.x * barycentric_coord.y + v2.x * barycentric_coord.z;
float v = v0.y * barycentric_coord.x + v1.y * barycentric_coord.y + v2.y * barycentric_coord.z;
float w = v0.z * barycentric_coord.x + v1.z * barycentric_coord.y + v2.z * barycentric_coord.z;
int i = (u - knotListD[0]) / (knotListD[orderU] - knotListD[0]);
int j = (v - knotListD[20 + 0]) / (knotListD[20 + orderV] - knotListD[20 + 0]);
int k = (w - knotListD[40 + 0]) / (knotListD[40 + orderW] - knotListD[40 + 0]);
if (i >= ctrlPointNumU + orderU - 2 * (orderU - 1) - 1)
--i;
if (j >= ctrlPointNumV + orderV - 2 * (orderV - 1) - 1)
--j;
if (k >= ctrlPointNumW + orderW - 2 * (orderW - 1) - 1)
--k;
/* 确定此 block 需要的 u、v、w 三个方向的 B 样条矩阵 */
float *Mu = matrixCase(matrix_b_spline_d, orderU, ctrlPointNumU, i + orderU - 1);
float *Mv = matrixCase(matrix_b_spline_d, orderV, ctrlPointNumV, j + orderV - 1);
float *Mw = matrixCase(matrix_b_spline_d, orderW, ctrlPointNumW, k + orderW - 1);
float tmpKnot = knotListD[i + orderU - 1];
float tmpKnot1 = knotListD[i + orderU];
u = (u - tmpKnot) / (tmpKnot1 - tmpKnot);
tmpKnot = knotListD[20 + j + orderV - 1];
tmpKnot1 = knotListD[20 + j + orderV];
v = (v - tmpKnot) / (tmpKnot1 - tmpKnot);
tmpKnot = knotListD[40 + k + orderW - 1];
tmpKnot1 = knotListD[40 + k + orderW];
w = (w - tmpKnot) / (tmpKnot1 - tmpKnot);
extern __shared__ float shared_array[];
/* 算出该线程负责的采样点的 B 样条体值 */
float3 result = BSplineVolumeValueMatrixD2(Mu, Mv, Mw,
u, v, w, shared_array,
i + orderU - 1, j + orderV - 1, k + orderW - 1,
orderU, orderV, orderW);
sampleValueD_truth[index2c(localIdx, triangleIdx, m)] = result.x;
sampleValueD_truth[index2c(localIdx, triangleIdx + f, m)] = result.y;
sampleValueD_truth[index2c(localIdx, triangleIdx + f * 2, m)] = result.z;
}
void calcSampleValue_truth()
{
calcSampleValueThread_truth<<<blockNumStep0_truth, blockSizeStep0, sizeof(float) * blockSizeStep0 * 11>>>
(triangleListD, sampleValueD_truth,
activeThreadNumStep0_truth, triangleCtrlPointNum, triangleNum,
degree, order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
}
#endif
/************************************************************************************************************/
#define NEW_MOVE // 定义这个表示使用最终的move函数,否则使用最原始的PN算法
#ifdef NEW_MOVE
// 调整拟合出来的控制顶点
__global__ void move(TriangleD *triangleListD, float *triangleCtrlPointD, int *triangle_adjacent_tableD,
int m_, int f, float center_factor, bool use_pn)
{
int triangleIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (triangleIdx >= f)
return;
int adj_face_idx[3];
adj_face_idx[0] = triangle_adjacent_tableD[triangleIdx * 3];
adj_face_idx[1] = triangle_adjacent_tableD[triangleIdx * 3 + 1];
adj_face_idx[2] = triangle_adjacent_tableD[triangleIdx * 3 + 2];
//int adj_edge_idx[3] = { -1, -1, -1 }; // 实际上应该使用这一句,以此判断有没有相邻三角形
int adj_edge_idx[3] = { 0, 0, 0 }; // 但是对于某些模型使用上一句会出现内存越界,所以暂且使用这一句,权宜之计
//bool handle[3] = { false, false, false }; // 在该边有一个以上法向时,是否处理这条边。当这条边有相邻面片才需要处理
for (int i = 0; i < 3; ++i)
if (adj_face_idx[i] >= 0)
{
adj_edge_idx[i] = adj_face_idx[i] & 0x3;
adj_face_idx[i] = adj_face_idx[i] >> 2;
//handle[i] = true;
}
int n_count[3];
n_count[0] = triangleListD[triangleIdx].nc[0];
n_count[1] = triangleListD[triangleIdx].nc[1];
n_count[2] = triangleListD[triangleIdx].nc[2];
//printf("ncount = (%d, %d, %d): triangleIdx = %d\n", n_count[0], n_count[1], n_count[2], triangleIdx);
float *p_x = &triangleCtrlPointD[m_ * triangleIdx];
float *p_y = &triangleCtrlPointD[m_ * (f + triangleIdx)];
float *p_z = &triangleCtrlPointD[m_ * (f * 2 + triangleIdx)];
int edge_ctrlpoint_idx[6] = { 5, 2, 1, 3, 7, 8 }; // 依次处理的边控制顶点的序号(0,1号属于0号边, 2,3号属于1号边, 4,5号属于2号边)
int corner_ctrlpoint_idx[6] = { 9, 0, 0, 6, 6, 9 }; // 上面每个边控制顶点对应的角控制顶点序号(0,1号属于0号边, 2,3号属于1号边, 4,5号属于2号边)
int oppo_corner_ctrlpoint_idx[6] = { 0, 9, 6, 0, 9, 6 }; // 上面每个角控制顶点所在边的另一个角控制顶点编号(0,1号属于0号边, 2,3号属于1号边, 4,5号属于2号边)
//int adj_corner_ctrlpoint_idx[3][2] = { { 0, 9 }, { 6, 0 }, { 9, 6 } }; // 相邻三角形0, 1, 2号边上的控制顶点编号(仅有角点,没有边点)
//int adj_edge_ctrlpoint_idx[3][2] = { { 2, 5 }, { 3, 1 }, { 8, 7 } }; // 相邻三角形0, 1, 2号边上的控制顶点编号(仅有边点,没有角点)
int adjust_normal_idx[6] = { 2, 0, 0, 1, 1, 2 };
int adj_corner_ctrlpoint_idx[3][2] = { { 0, 2 }, { 1, 0 }, { 2, 1 } }; // 相邻三角形0, 1, 2号边上的控制顶点编号(仅有角点,没有边点)
//const float ZERO = 10e-6;
float3 delta = make_float3(0.0f, 0.0f, 0.0f), sum = make_float3(0.0f, 0.0f, 0.0f);
// 六个边点,按0 1 2号边的顺序处理,即5, 2, 1, 3, 7, 8号控制顶点
//printf("for开始, triangleIdx = %d\n", triangleIdx);
for (int i = 0; i < 6; ++i)
{
float3 v_ctrlpoint_corner = make_float3(*(p_x + corner_ctrlpoint_idx[i]), *(p_y + corner_ctrlpoint_idx[i]), *(p_z + corner_ctrlpoint_idx[i]));
float3 v_ctrlpoint_corner_oppo = make_float3(*(p_x + oppo_corner_ctrlpoint_idx[i]), *(p_y + oppo_corner_ctrlpoint_idx[i]), *(p_z + oppo_corner_ctrlpoint_idx[i]));
float3 v01 = v_ctrlpoint_corner_oppo - v_ctrlpoint_corner;
float3 v_mid = 0.5 * (v_ctrlpoint_corner + v_ctrlpoint_corner_oppo);
//float3 n_ctrlpoint_corner = make_float3(*(pn_x + corner_ctrlpoint_idx[i]), *(pn_y + corner_ctrlpoint_idx[i]), *(pn_z + corner_ctrlpoint_idx[i]));
float3 n_ctrlpoint_corner = triangleListD[triangleIdx].n_adj[adjust_normal_idx[i]];
normalize(n_ctrlpoint_corner);
// p 是要处理的边控制顶点
float3 p = make_float3(*(p_x + edge_ctrlpoint_idx[i]), *(p_y + edge_ctrlpoint_idx[i]), *(p_z + edge_ctrlpoint_idx[i]));
if (n_count[i / 2] < 2) // 该条边只有一个法向
{
//if (adj_face_idx[i / 2] >= 0) // 只有当这条边的另一侧有面片时才会处理
//{
float3 result = p - ((p - v_ctrlpoint_corner) * n_ctrlpoint_corner) * n_ctrlpoint_corner;
#ifdef RE_LENGTH
float len0 = length(result);
float3 result_vector = result - v_ctrlpoint_corner;
float l_origin = length(p - v_ctrlpoint_corner);
float l_current = length(result_vector);
result_vector *= l_origin / l_current;
result = v_ctrlpoint_corner + result_vector;
float len1 = length(result);
printf("delta_leng_1_normal = %f\n", len1 - len0);
#endif
delta += (result - p);
*(p_x + edge_ctrlpoint_idx[i]) = result.x;
*(p_y + edge_ctrlpoint_idx[i]) = result.y;
*(p_z + edge_ctrlpoint_idx[i]) = result.z;
sum += result;
//}
//printf("only one : result_%d = (%f, %f, %f)\n", edge_ctrlpoint_idx[i], result.x, result.y, result.z);
}
//else if (handle[i / 2]) // 该条边有一个以上法向,且需要处理
else // 该条边有一个以上法向
{
float3 n1 = triangleListD[adj_face_idx[i / 2]].n_adj[adj_corner_ctrlpoint_idx[adj_edge_idx[i / 2]][i % 2]];
//printf("else开始, triangleIdx = %d, adj_face = %d, cp = %d, n1 = (%f, %f, %f)\n", triangleIdx, adj_face_idx[i / 2], edge_ctrlpoint_idx[i], n1.x, n1.y, n1.z);
normalize(n1);
//if (use_pn)
//{
float3 n_ave = cross(n_ctrlpoint_corner, n1);
normalize(n_ave);
//printf("t = %d, n_cross = %f, %f, %f\n", triangleIdx, n_ave.x, n_ave.y, n_ave.z);
//float3 result = v_ctrlpoint_corner + v01 * n_ave * 0.333333 * n_ave; // 原始的pn尖锐边算法,将1/3点往法向上投影,效果不佳
float3 result = v_ctrlpoint_corner + ((p - v_ctrlpoint_corner) * n_ave) * n_ave; // 由我的算法改良而来,将差的控制顶点往法向上投影,效果很好
#ifdef RE_LENGTH
float len0 = length(result);
float3 result_vector = result - v_ctrlpoint_corner;
float l_origin = length(p - v_ctrlpoint_corner);
float l_current = length(result_vector);
result_vector *= l_origin / l_current;
result = v_ctrlpoint_corner + result_vector;
float len1 = length(result);
printf("delta_leng_pn = %f\n", len1 - len0);
#endif
delta += (result - p);
*(p_x + edge_ctrlpoint_idx[i]) = result.x;
*(p_y + edge_ctrlpoint_idx[i]) = result.y;
*(p_z + edge_ctrlpoint_idx[i]) = result.z;
sum += result;
//printf("2 : result_%d = (%f, %f, %f)\n", edge_ctrlpoint_idx[i], result.x, result.y, result.z);
//}
//else
//{
//float t0 = 1.2345f, t1 = 2.3456f;
//float3 center0, center1;
//bool t0_exist = false, t1_exist = false;
//if (fabs(n_ctrlpoint_corner * v01) > ZERO)
//{
//t0 = (v_mid - v_ctrlpoint_corner) * v01 / (n_ctrlpoint_corner * v01);
//center0 = v_ctrlpoint_corner + t0 * n_ctrlpoint_corner;
//t0_exist = true;
////if (triangleIdx == 10 && i == 5)
////{
////printf("n0 = (%f, %f, %f), t0 = %f, center0 = (%f, %f, %f)\n",
////n_ctrlpoint_corner.x, n_ctrlpoint_corner.y, n_ctrlpoint_corner.z, t0, center0.x, center0.y, center0.z);
////}
//}
//if (fabs(n1 * v01) > ZERO)
//{
//t1 = (v_mid - v_ctrlpoint_corner) * v01 / (n1 * v01);
//center1 = v_ctrlpoint_corner + t1 * n1;
//t1_exist = true;
////if (triangleIdx == 10 && i == 5)
////{
////printf("n1 = (%f, %f, %f), t1 = %f, center1 = (%f, %f, %f)\n",
////n1.x, n1.y, n1.z, t1, center1.x, center1.y, center1.z);
////}
//}
////printf("t0 = %f, t1 = %f, triangleIdx = %d, cp = %d\n", t0, t1, triangleIdx, edge_ctrlpoint_idx[i]);
//float3 center_mid;
//if (t0_exist && t1_exist) // 当前三角形和相邻三角形都不精确
//{
//float3 center_delta = center0 - center1;
//float t = (v_ctrlpoint_corner - center0) * center_delta / (center_delta * center_delta);
//center_mid = center0 + t * center_delta;
//float3 rad0 = v_ctrlpoint_corner - center0;
//float r0 = sqrt(rad0.x * rad0.x + rad0.y * rad0.y + rad0.z * rad0.z);
//float3 rad1 = v_ctrlpoint_corner - center1;
//float r1 = sqrt(rad1.x * rad1.x + rad1.y * rad1.y + rad1.z * rad1.z);
////printf("都不精确, 三角形=%d, cp=%d, t = %f, r0 = %f, r1 = %f\n", triangleIdx, edge_ctrlpoint_idx[i], t, r0, r1);
//}
//else if (t0_exist) // 当前三角形不精确,相邻三角形精确
//{
//float t = (v_ctrlpoint_corner - center0) * n1 / (n1 * n1);
//center_mid = center0 + t * n1;
////printf("当前三角形不精确,相邻三角形精确, 三角形=%d, cp=%d, n1 = (%f, %f, %f), t = %f\n", triangleIdx, edge_ctrlpoint_idx[i], n1.x, n1.y, n1.z, t);
//}
//else if (t1_exist) // 当前三角形精确,相邻三角形不精确
//{
//float t = (v_ctrlpoint_corner - center1) * n_ctrlpoint_corner / (n_ctrlpoint_corner * n_ctrlpoint_corner);
//center_mid = center1 + t * n_ctrlpoint_corner;
////printf("当前三角形精确,相邻三角形不精确, 三角形=%d, cp=%d, t = %f\n", triangleIdx, edge_ctrlpoint_idx[i], t);
//}
//else // 当前三角形和相邻三角形都精确
//{
////printf("两个都精确, 三角形=%d, cp=%d\n", triangleIdx, edge_ctrlpoint_idx[i]);
//continue;
//}
//float3 n_ave = v_ctrlpoint_corner - center_mid;
//normalize(n_ave);
//float3 result = p - ((p - v_ctrlpoint_corner) * n_ave) * n_ave;
////printf("t = %d, n_ave = %f, %f, %f\tp = %f, %f, %f\t, result=%f, %f, %f\n", triangleIdx, n_ave.x, n_ave.y, n_ave.z, p.x, p.y, p.z, result.x, result.y, result.z);
//#ifdef RE_LENGTH
//float len0 = length(result);
//float3 result_vector = result - v_ctrlpoint_corner;
//float l_origin = length(p - v_ctrlpoint_corner);
//float l_current = length(result_vector);
//result_vector *= l_origin / l_current;
//result = v_ctrlpoint_corner + result_vector;
//float len1 = length(result);
//printf("delta_leng_my = %f\n", len1 - len0);
//#endif
//delta += (result - p);
//*(p_x + edge_ctrlpoint_idx[i]) = result.x;
//*(p_y + edge_ctrlpoint_idx[i]) = result.y;
//*(p_z + edge_ctrlpoint_idx[i]) = result.z;
//sum += result;
//float3 n_pn = cross(n_ctrlpoint_corner, n1);
//normalize(n_pn);
//float3 result_pn = v_ctrlpoint_corner + ((p - v_ctrlpoint_corner) * n_pn) * n_pn;
//float3 del = result_pn - result;
//float dot = n_ave * n_pn;
////printf("del = %f, %f, %f\t\tdot = %f\n", del.x, del.y, del.z, dot);
////printf("2 : result_%d = (%f, %f, %f)\n", edge_ctrlpoint_idx[i], result.x, result.y, result.z);
//}
}
}
// 中间控制顶点,即4号控制顶点
#ifdef LESS_THAN_2
if (n_count[0] < 2 && n_count[1] < 2 && n_count[2] < 2)
#endif
{
float3 p = make_float3(*(p_x + 4), *(p_y + 4), *(p_z + 4));
/******** 平均顶点位置,PN-Triangle方法 *********/
//sum *= 1.0 / 6;
//float3 result = sum + (sum - p) * 0.5;
/******** 平均delta *********/
delta *= center_factor / 6;
//delta *= 1.5 / 6;
float3 result = p + delta;
/******** 写结果 *********/
*(p_x + 4) = result.x;
*(p_y + 4) = result.y;
*(p_z + 4) = result.z;
//printf("result_4 = (%f, %f, %f)\n", result.x, result.y, result.z);
}
}
#else
__global__ void move(TriangleD *triangleListD, float *triangleCtrlPointD, float *triangleNormalCtrlPointD, int m_, int f)
{
int triangleIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (triangleIdx >= f)
return;
float *p_x = &triangleCtrlPointD[m_ * triangleIdx];
float *p_y = &triangleCtrlPointD[m_ * (f + triangleIdx)];
float *p_z = &triangleCtrlPointD[m_ * (f * 2 + triangleIdx)];
float *pn_x = &triangleNormalCtrlPointD[m_ * triangleIdx];
float *pn_y = &triangleNormalCtrlPointD[m_ * (f + triangleIdx)];
float *pn_z = &triangleNormalCtrlPointD[m_ * (f * 2 + triangleIdx)];
/******* 点1 *******/
/*#define MOVE1*/ // MOVE1被定义表示永远使用原始三角形的信息进行调整,理论上是错误的,只有初始情况下正确,仅供调试时用
#ifdef MOVE1
float3 v = triangleListD[triangleIdx].v[0];
float3 n = triangleListD[triangleIdx].n[0];
#else
float3 v = make_float3(*p_x, *p_y, *p_z);
float3 n = make_float3(*pn_x, *pn_y, *pn_z);
float length = sqrt(n.x * n.x + n.y * n.y + n.z * n.z);
normalize(n);
#endif
float3 p = make_float3(*(p_x + 1), *(p_y + 1), *(p_z + 1));
float3 result = p - ((p - v) * n) * n;
float3 delta = result - p;
/*if (threadIdx.x == 0)*/
/*{*/
/*printf("triangleIdx = %d\n", triangleIdx);*/
/*printf("待投点 = (%f, %f, %f), 法向 = (%f, %f, %f), 角点 = (%f, %f, %f),\n结果 = (%f, %f, %f), 差值 = (%f, %f, %f)\n",*/
/*p.x, p.y, p.z, n.x, n.y, n.z, v.x, v.y, v.z, result.x, result.y, result.z, delta.x, delta.y, delta.z);*/
/*}*/
*(p_x + 1) = result.x;
*(p_y + 1) = result.y;
*(p_z + 1) = result.z;
float3 sum = result;
// 点2
p = make_float3(*(p_x + 2), *(p_y + 2), *(p_z + 2));
result = p - ((p - v) * n) * n;
*(p_x + 2) = result.x;
*(p_y + 2) = result.y;
*(p_z + 2) = result.z;
sum += result;
/******* 点3 *******/
#ifdef MOVE1
v = triangleListD[triangleIdx].v[1];
n = triangleListD[triangleIdx].n[1];
#else
v = make_float3(*(p_x + 6), *(p_y + 6), *(p_z + 6));
n = make_float3(*(pn_x + 6), *(pn_y + 6), *(pn_z + 6));
normalize(n);
#endif
p = make_float3(*(p_x + 3), *(p_y + 3), *(p_z + 3));
result = p - ((p - v) * n) * n;
*(p_x + 3) = result.x;
*(p_y + 3) = result.y;
*(p_z + 3) = result.z;
sum += result;
// 点7
p = make_float3(*(p_x + 7), *(p_y + 7), *(p_z + 7));
result = p - ((p - v) * n) * n;
*(p_x + 7) = result.x;
*(p_y + 7) = result.y;
*(p_z + 7) = result.z;
sum += result;
/******* 点8 *******/
#ifdef MOVE1
v = triangleListD[triangleIdx].v[2];
n = triangleListD[triangleIdx].n[2];
#else
v = make_float3(*(p_x + 9), *(p_y + 9), *(p_z + 9));
n = make_float3(*(pn_x + 9), *(pn_y + 9), *(pn_z + 9));
normalize(n);
#endif
p = make_float3(*(p_x + 8), *(p_y + 8), *(p_z + 8));
result = p - ((p - v) * n) * n;
*(p_x + 8) = result.x;
*(p_y + 8) = result.y;
*(p_z + 8) = result.z;
sum += result;
// 点5
p = make_float3(*(p_x + 5), *(p_y + 5), *(p_z + 5));
result = p - ((p - v) * n) * n;
*(p_x + 5) = result.x;
*(p_y + 5) = result.y;
*(p_z + 5) = result.z;
sum += result;
/******* 点4 *******/
p = make_float3(*(p_x + 4), *(p_y + 4), *(p_z + 4));
sum *= 1.0 / 6;
result = sum + (sum - p) * 0.5;
*(p_x + 4) = result.x;
*(p_y + 4) = result.y;
*(p_z + 4) = result.z;
}
#endif
float center_factor = 1.5f;
void calcTriangleCtrlPoint(bool adjust_silhouette, bool use_pn, AlgorithmType algo_type)
{
if (algo_type == CYM)
{
float alpha = 1.0f, beta = 0.0f;
/* 计算面片和法向的控制顶点*/
cublasStatus_t stat = cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
triangleCtrlPointNum_lower, triangleNum * 6, triangleCtrlPointNum + constrait_point_num,
&alpha,
matrixFittingD + matrixStartIdxFitting, triangleCtrlPointNum_lower,
sampleValueD, triangleCtrlPointNum + constrait_point_num,
&beta,
triangleCtrlPointD, triangleCtrlPointNum_lower);
if (stat != CUBLAS_STATUS_SUCCESS)
{
cout << "triangleCtrlPointD fail!!!!!!!!!!!!!\tstat = " << stat << endl;
printCudaError(__FILE__, __FUNCTION__, __LINE__);
return;
}
}
// 计算每个三角片的三个用于调整控制顶点的法向
calcAdjustNormal<<<blockNumAdjNormal, blockSizeAdjNormal, sizeof(float) * blockSizeAdjNormal * 8>>>
(triangleListD, triangleNum,
order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
if (algo_type == CYM)
{
// 调整侧影轮廓线
if (adjust_silhouette)
{
const int move_block_size = 256;
int move_block_num = ceil(static_cast<double>(triangleNum) / move_block_size);
//cout << "move 开始" << endl;
#ifdef NEW_MOVE
move<<<move_block_num, move_block_size>>>(triangleListD, triangleCtrlPointD, triangle_adjacent_tableD,
triangleCtrlPointNum_lower, triangleNum, center_factor, use_pn);
#else
move<<<move_block_num, move_block_size>>>(triangleListD, triangleCtrlPointD, triangleCtrlPointD + 3 * triangleNum * triangleCtrlPointNum_lower, triangleCtrlPointNum_lower, triangleNum);
#endif
//cudaThreadSynchronize();
//move<<<move_block_num, move_block_size>>>(triangleListD, triangleCtrlPointD, triangleCtrlPointD + 3 * triangleNum * triangleCtrlPointNum_lower, triangle_adjacent_tableD,
//triangleCtrlPointNum_lower, triangleNum, center_factor);
#ifndef MORPH
cout << "center_factor = " << center_factor << endl;
#endif
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
// 将计算好的控制顶点传回内存,仅用于调试,在最终结果上显示控制顶点,测效率时需删除
cudaMemcpy(triangular_ctrl_points, triangleCtrlPointD, sizeof(float) * 3 * triangleNum * triangleCtrlPointNum_lower, cudaMemcpyDeviceToHost);
#endif
}
else
{
int blockNum = ceil(static_cast<double>(triangleNum) / 128);
calcCtrlPoint_PN<<<blockNum, 128>>>(triangleListD, triangle_adjacent_tableD, sampleValueD_PN, triangleCtrlPointD_PN, triangleNormalCtrlPointD_PN, triangleNum, triangleCtrlPointNum_lower);
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
// 将计算好的控制顶点传回内存,仅用于调试,在最终结果上显示控制顶点,测效率时需删除
cudaMemcpy(triangular_ctrl_points, triangleCtrlPointD_PN, sizeof(float) * 3 * triangleNum * triangleCtrlPointNum_lower, cudaMemcpyDeviceToHost);
#endif
}
#ifndef MORPH
cout << "triangleNum = " << triangleNum << endl;
#endif
//float *test = new float[6 * triangleNum * 3];
//cudaMemcpy(test, triangleNormalCtrlPointD_PN, sizeof(float) * 6 * triangleNum * 3, cudaMemcpyDeviceToHost);
//for (int i = 0; i < triangleNum; ++i)
//{
//for (int j = 0; j < 6; ++j)
//{
////cout << i * 10 + j << ", " << (i + triangleNum) * 10 + j << ", " << (i + triangleNum * 2) * 10 + j << endl;
//cout
//<< test[i * 6 + j] << ", "
//<< test[(i + triangleNum) * 6 + j] << ", "
//<< test[(i + triangleNum * 2) * 6 + j] << endl;
//}
//cout << "================" << endl;
//}
//float *test = new float[triangleCtrlPointNum_lower * triangleNum * 6];
//cudaMemcpy(test, triangleCtrlPointD, sizeof(float) * triangleCtrlPointNum_lower * triangleNum * 6, cudaMemcpyDeviceToHost);
//float *v = test, *n = test + triangleCtrlPointNum_lower * triangleNum * 3;
//for (int i = 0; i < triangleNum; ++i)
////for (int i = 24; i < 25; ++i)
//{
//cout << "i = " << i << endl;
//for (int j = 0; j < triangleCtrlPointNum_lower; ++j)
//{
////if (j != 0 && j != 6) continue;
////cout << i * 10 + j << ", " << (i + triangleNum) * 10 + j << ", " << (i + triangleNum * 2) * 10 + j << endl;
//cout << "\t" << j << ": " << v[i * triangleCtrlPointNum_lower + j] << ", "
//<< v[(i + triangleNum) * triangleCtrlPointNum_lower + j] << ", "
//<< v[(i + triangleNum * 2) * triangleCtrlPointNum_lower + j] << "\t";
//double x = n[i * triangleCtrlPointNum_lower + j];
//double y = n[(i + triangleNum) * triangleCtrlPointNum_lower + j];
//double z = n[(i + triangleNum * 2) * triangleCtrlPointNum_lower + j];
//double length = sqrt(x * x + y * y + z * z);
//cout << "\t" << x / length << ", " << y / length << ", " << z / length << endl;
//}
//cout << "================" << endl;
//}
//delete []test;
}
#ifdef TRUTH
void matrixMul1_truth()
{
float alpha = 1.0f, beta = 0.0f;
cublasStatus_t stat = cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
samplePointPerTriangle * 3, triangleCtrlPointNum, triangleCtrlPointNum,
&alpha,
BqD_truth, samplePointPerTriangle * 3,
B_1D_truth, triangleCtrlPointNum,
&beta,
BBD_truth, samplePointPerTriangle * 3);
if (stat != CUBLAS_STATUS_SUCCESS)
{
cout << "CtrlPoint_truth fail!!!!!!!!!!!!!\tstat = " << stat << endl;
printCudaError(__FILE__, __FUNCTION__, __LINE__);
return;
}
}
#endif
/************************************************************************************************************/
__global__ void copy(float *RD,
int activeThreadNumCopy, bool firstLoad, float maxX, float maxY, float maxZ,
TriangleD *triangleListD, int segmentPerEdge, int f, int q,
float *normalPtrVBO, float *texCoordPtrVBO, float *texCoord3DPtrVBO, float *vertexPtrVBO)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNumCopy)
return;
int triangleIdx = globalIdx / q;
int localIdx = globalIdx % q;
vertexPtrVBO[globalIdx * 3 + 0] = RD[triangleIdx * q + localIdx];
vertexPtrVBO[globalIdx * 3 + 1] = RD[(triangleIdx + f) * q + localIdx];
vertexPtrVBO[globalIdx * 3 + 2] = RD[(triangleIdx + f * 2) * q + localIdx];
float *ND = RD + 3 * f * q;
normalPtrVBO[globalIdx * 3 + 0] = ND[triangleIdx * + q + localIdx];
normalPtrVBO[globalIdx * 3 + 1] = ND[(triangleIdx + f) * + q + localIdx];
normalPtrVBO[globalIdx * 3 + 2] = ND[(triangleIdx + f * 2) * + q + localIdx];
//if (firstLoad)
//{
//// 计算纹理坐标
//float2 vt0 = triangleListD[triangleIdx].vt[0];
//float2 vt1 = triangleListD[triangleIdx].vt[1];
//float2 vt2 = triangleListD[triangleIdx].vt[2];
//float tempFloorFloat = (sqrtf((float)(localIdx) * 8 + 9) - 3) / 2;
//int floor = rintf(tempFloorFloat);
//if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
//floor = ceilf(tempFloorFloat);
//int room = localIdx - (floor + 1) * floor / 2;
//float3 barycentric_coord;
//barycentric_coord.x = (float)(segmentPerEdge - floor) / segmentPerEdge;
//barycentric_coord.y = (float)(floor - room) / segmentPerEdge;
//barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
//float u = vt0.x * barycentric_coord.x + vt1.x * barycentric_coord.y + vt2.x * barycentric_coord.z;
//float v = vt0.y * barycentric_coord.x + vt1.y * barycentric_coord.y + vt2.y * barycentric_coord.z;
//// 存储二维纹理坐标
//texCoordPtrVBO[globalIdx * 2 + 0] = u;
//texCoordPtrVBO[globalIdx * 2 + 1] = v;
//// 存储三维纹理坐标
//float minMax = maxX;
//if (minMax > maxY)
//minMax = maxY;
//if (minMax > maxZ)
//minMax = maxZ;
////texCoord3DPtrVBO[globalIdx * 3 + 0] = vertexPtrVBO[globalIdx * 3 + 0] / maxX;
////texCoord3DPtrVBO[globalIdx * 3 + 1] = vertexPtrVBO[globalIdx * 3 + 1] / maxY;
////texCoord3DPtrVBO[globalIdx * 3 + 2] = vertexPtrVBO[globalIdx * 3 + 2] / maxZ;
//texCoord3DPtrVBO[globalIdx * 3 + 0] = vertexPtrVBO[globalIdx * 3 + 0] / minMax;
//texCoord3DPtrVBO[globalIdx * 3 + 1] = vertexPtrVBO[globalIdx * 3 + 1] / minMax;
//texCoord3DPtrVBO[globalIdx * 3 + 2] = vertexPtrVBO[globalIdx * 3 + 2] / minMax;
//}
}
#ifdef LINE
__global__ void make_bary(TriangleD *triangleListD, float *baryPtrVBO, float *oriBaryPtrVBO, int n, int q)
{
/***************************** 生成切割后每个顶点的重心坐标 ******************************/
int localIdx = threadIdx.x;
float tempFloorFloat = (sqrtf((float)localIdx * 8 + 9) - 3) / 2;
int floor = rintf(tempFloorFloat);
if ((floor * 2 + 3) * (floor * 2 + 3) != localIdx * 8 + 9)
floor = ceilf(tempFloorFloat);
int room = localIdx - (floor + 1) * floor / 2;
float3 barycentric_coord;
barycentric_coord.x = (float)(n - floor) / n;
barycentric_coord.y = (float)(floor - room) / n;
barycentric_coord.z = 1.0f - barycentric_coord.x - barycentric_coord.y;
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
baryPtrVBO[globalIdx * 3 + 0] = (float)(n - floor) / n;
baryPtrVBO[globalIdx * 3 + 1] = barycentric_coord.y;
baryPtrVBO[globalIdx * 3 + 2] = barycentric_coord.z;
/***************************** 生成切割前每个顶点的重心坐标 ******************************/
int triangleIdx = blockIdx.x;
// 切割后三角形三个顶点在原始三角形中的重心坐标
float3 bary_origin0 = triangleListD[triangleIdx].bary_origin[0];
float3 bary_origin1 = triangleListD[triangleIdx].bary_origin[1];
float3 bary_origin2 = triangleListD[triangleIdx].bary_origin[2];
// 当前点在原始三角形上的重心坐标
float3 bary_origin = bary_origin0 * barycentric_coord.x + bary_origin1 * barycentric_coord.y + bary_origin2 * barycentric_coord.z;
// 存储目前处理的采样点在原始三角片上的重心坐标
oriBaryPtrVBO[globalIdx * 3 + 0] = bary_origin.x;
oriBaryPtrVBO[globalIdx * 3 + 1] = bary_origin.y;
oriBaryPtrVBO[globalIdx * 3 + 2] = bary_origin.z;
}
#endif
#ifdef TRUTH
__global__ void copy_truth(float *RD_truth,
int activeThreadNumCopy, bool firstLoad,
TriangleD *triangleListD, int segmentPerEdge, int f, int q,
float *normalPtrVBO_truth, float *vertexPtrVBO_truth)
{
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
if (globalIdx >= activeThreadNumCopy)
return;
int triangleIdx = globalIdx / q;
int localIdx = globalIdx % q;
vertexPtrVBO_truth[globalIdx * 3 + 0] = RD_truth[triangleIdx * q * 3 + localIdx];
vertexPtrVBO_truth[globalIdx * 3 + 1] = RD_truth[(triangleIdx + f) * q * 3 + localIdx];
vertexPtrVBO_truth[globalIdx * 3 + 2] = RD_truth[(triangleIdx + f * 2) * q * 3 + localIdx];
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
float ux = RD_truth[triangleIdx * q * 3 + q + localIdx];
float uy = RD_truth[(triangleIdx + f) * q * 3 + q + localIdx];
float uz = RD_truth[(triangleIdx + f * 2) * q * 3 + q + localIdx];
float vx = RD_truth[triangleIdx * q * 3 + q * 2 + localIdx];
float vy = RD_truth[(triangleIdx + f) * q * 3 + q * 2 + localIdx];
float vz = RD_truth[(triangleIdx + f * 2) * q * 3 + q * 2 + localIdx];
float nx = uy * vz - uz * vy;
float ny = uz * vx - ux * vz;
float nz = ux * vy - uy * vx;
float l = sqrtf(nx * nx + ny * ny + nz * nz);
nx /= l;
ny /= l;
nz /= l;
normalPtrVBO_truth[globalIdx * 3 + 0] = nx;
normalPtrVBO_truth[globalIdx * 3 + 1] = ny;
normalPtrVBO_truth[globalIdx * 3 + 2] = nz;
}
#endif
bool registered = false;
GLuint normalVBO = 0, texCoordVBO = 0, texCoord3DVBO = 0, vertexVBO = 0;
#ifdef LINE
GLuint baryVBO = 0, oriBaryVBO = 0;
#endif
float *normalPtrVBO; // 读写缓冲区对象所用的指针
float *texCoordPtrVBO; // 读写缓冲区对象所用的指针
float *texCoord3DPtrVBO; // 读写缓冲区对象所用的指针
float *vertexPtrVBO; // 读写缓冲区对象所用的指针
#ifdef LINE
float *baryPtrVBO, *oriBaryPtrVBO; // 读写缓冲区对象所用的指针
#endif
struct cudaGraphicsResource *normalVBO_CUDA, *texCoordVBO_CUDA, *texCoord3DVBO_CUDA, *vertexVBO_CUDA;
#ifdef LINE
struct cudaGraphicsResource *baryVBO_CUDA, *oriBaryVBO_CUDA;
#endif
void tessellateD(bool firstLoad, float maxX, float maxY, float maxZ, AlgorithmType algo_type)
{
float alpha = 1.0f, beta = 0.0f;
// 计算三角化点的坐标和法向
cublasStatus_t stat;
if (algo_type == CYM)
{
stat = cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
samplePointPerTriangle, triangleNum * 6, triangleCtrlPointNum_lower,
&alpha,
BqD, samplePointPerTriangle,
triangleCtrlPointD, triangleCtrlPointNum_lower,
&beta,
RD, samplePointPerTriangle);
}
//else if (algo_type == PN_CUTTING)
else
{
stat = cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
samplePointPerTriangle, triangleNum * 3, triangleCtrlPointNum_lower,
&alpha,
BqD, samplePointPerTriangle,
triangleCtrlPointD_PN, triangleCtrlPointNum_lower,
&beta,
RD, samplePointPerTriangle);
stat = cublasSgemm(cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N,
samplePointPerTriangle, triangleNum * 3, 6,
&alpha,
BqD_PN, samplePointPerTriangle,
triangleNormalCtrlPointD_PN, 6,
&beta,
RD + samplePointPerTriangle * triangleNum * 3, samplePointPerTriangle);
}
if (stat != CUBLAS_STATUS_SUCCESS)
{
cout << "RD fail!!!!!!!!!!!!!\tstat = " << stat << "\t\t";
printCudaError(__FILE__, __FUNCTION__, __LINE__);
cout << endl;
return;
}
cudaGraphicsMapResources(1, &normalVBO_CUDA, 0);
cudaGraphicsMapResources(1, &texCoordVBO_CUDA, 0);
cudaGraphicsMapResources(1, &texCoord3DVBO_CUDA, 0);
cudaGraphicsMapResources(1, &vertexVBO_CUDA, 0);
#ifdef LINE
cudaGraphicsMapResources(1, &baryVBO_CUDA, 0);
cudaGraphicsMapResources(1, &oriBaryVBO_CUDA, 0);
#endif
size_t size2 = sizeof(float) * samplePointPerTriangle * triangleNum * 2;
size_t size3 = sizeof(float) * samplePointPerTriangle * triangleNum * 3;
cudaGraphicsResourceGetMappedPointer((void**)&normalPtrVBO, &size3, normalVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&texCoordPtrVBO, &size2, texCoordVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&texCoord3DPtrVBO, &size2, texCoord3DVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&vertexPtrVBO, &size3, vertexVBO_CUDA);
#ifdef LINE
cudaGraphicsResourceGetMappedPointer((void**)&baryPtrVBO, &size3, baryVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&oriBaryPtrVBO, &size3, oriBaryVBO_CUDA);
#endif
copy<<<blockNumCopy, blockSizeCopy>>>(RD,
activeThreadNumCopy, firstLoad, maxX, maxY, maxZ, triangleListD, segmentPerEdge, triangleNum, samplePointPerTriangle,
normalPtrVBO, texCoordPtrVBO, texCoord3DPtrVBO, vertexPtrVBO);
#ifdef LINE
make_bary<<<triangleNum, samplePointPerTriangle>>>(triangleListD, baryPtrVBO, oriBaryPtrVBO, segmentPerEdge, samplePointPerTriangle);
#endif
cudaGraphicsUnmapResources(1, &normalVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &texCoordVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &texCoord3DVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &vertexVBO_CUDA, 0);
#ifdef LINE
cudaGraphicsUnmapResources(1, &baryVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &oriBaryVBO_CUDA, 0);
#endif
}
//#ifdef TRUTH
GLuint normalVBO_truth = 0, vertexVBO_truth = 0;
float *normalPtrVBO_truth; // 读写缓冲区对象所用的指针
float *vertexPtrVBO_truth; // 读写缓冲区对象所用的指针
struct cudaGraphicsResource* normalVBO_CUDA_truth;
struct cudaGraphicsResource* vertexVBO_CUDA_truth;
//double vertex_error_ave_max = 0.0, vertex_error_max_max = 0.0;
//double normal_error_ave_max = 0.0, normal_error_max_max = 0.0;
int triangleCoord(int floor, int room)
{
return (1 + floor) * floor / 2 + room;
}
__global__ void my_to_truth(int f, int q, int point_per_real_face, float *myV, float *realV, int *my_to_truth_tableD, int *belongs_to_originD)
{
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
int triangleIdx = globalIdx / q;
if (triangleIdx >= f)
return;
//if (blockIdx.x == 0 && threadIdx.x == 0)
//{
//printf("test = %f ,, %f ,, %f\n", realV[3 * 3], realV[3 * 3 + 1], realV[3 * 3 + 2]);
//}
float dist_min = 999999, idx_min = -1;
realV += belongs_to_originD[triangleIdx] * point_per_real_face * 3;
//printf("shift = %d, belongto = %d, pointper = %d\n", belongs_to_originD[triangleIdx] * point_per_real_face, belongs_to_originD[triangleIdx], point_per_real_face);
//printf("f = %d, q = %d, belongs_to = %d\n", f, q, belongs_to_originD[triangleIdx]);
//for (int i = 0; i < point_per_real_face * 36; ++i)
for (int i = 0; i < point_per_real_face; ++i)
{
float dx = myV[globalIdx * 3] - realV[i * 3];
float dy = myV[globalIdx * 3 + 1] - realV[i * 3 + 1];
float dz = myV[globalIdx * 3 + 2] - realV[i * 3 + 2];
float dist = sqrt(dx * dx + dy * dy + dz * dz);
if (dist < dist_min)
{
dist_min = dist;
idx_min = i;
}
//if (blockIdx.x == 0 && threadIdx.x == 0)
//{
//printf("i = %d, dx = %f, dy = %f, fz = %f, dist = %f, origin = %d, now_in_face = %d\n", i, dx, dy, dz, dist, belongs_to_originD[triangleIdx], i / point_per_real_face);
//}
}
//printf("idx_min = %d\n", idx_min);
//my_to_truth_tableD[globalIdx] = idx_min;
my_to_truth_tableD[globalIdx] = idx_min + belongs_to_originD[triangleIdx] * point_per_real_face;
}
__global__ void deformTeapot(float3 *vertexParamListD_teapot, float3 *normalParamListD_teapot,
int activeThreadNum,
float *normalPtrVBO_truth, float *vertexPtrVBO_truth,
int orderU, int orderV, int orderW,
int ctrlPointNumU, int ctrlPointNumV, int ctrlPointNumW)
{
// u, v, w 表示经过重心坐标插值之后的采样点的x, y, z分量
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= activeThreadNum)
return;
float u = vertexParamListD_teapot[i].x;
float v = vertexParamListD_teapot[i].y;
float w = vertexParamListD_teapot[i].z;
//if (blockIdx.x == 0 && threadIdx.x == 3)
//printf("%f, %f, %f\n", u, v, w);
// u, v, w方向节点区间数量
int knot_interval_count_u = orderU + ctrlPointNumU - (orderU - 1) * 2 - 1;
int knot_interval_count_v = orderV + ctrlPointNumV - (orderV - 1) * 2 - 1;
int knot_interval_count_w = orderW + ctrlPointNumW - (orderW - 1) * 2 - 1;
// 预先将其值设为最大,将末端点归入最后一段
int left_idx_u = orderU - 1 + knot_interval_count_u - 1;
int left_idx_v = orderV - 1 + knot_interval_count_v - 1;
int left_idx_w = orderW - 1 + knot_interval_count_w - 1;
// 沿 U 方向查找当前点所在的节点区间
for (int ii = orderU - 1; ii <= orderU - 1 + knot_interval_count_u - 1; ++ii)
{
if (u >= knotListD[ii] && u < knotListD[ii + 1])
{
left_idx_u = ii;
break;
}
}
// 沿 V 方向查找当前点所在的节点区间
for (int jj = orderV - 1; jj <= orderV - 1 + knot_interval_count_v - 1; ++jj)
{
if (v >= knotListD[20 + jj] && v < knotListD[20 + jj + 1])
{
left_idx_v = jj;
break;
}
}
// 沿 W 方向查找当前点所在的节点区间
for (int kk = orderW - 1; kk <= orderW - 1 + knot_interval_count_w - 1; ++kk)
{
if (w >= knotListD[40 + kk] && w < knotListD[40 + kk + 1])
{
left_idx_w = kk;
break;
}
}
float tmpKnot = knotListD[left_idx_u];
float tmpKnot1 = knotListD[left_idx_u + 1];
float x_stride = tmpKnot1 - tmpKnot;
u = (u - tmpKnot) / x_stride;
tmpKnot = knotListD[20 + left_idx_v];
tmpKnot1 = knotListD[20 + left_idx_v + 1];
float y_stride = tmpKnot1 - tmpKnot;
v = (v - tmpKnot) / y_stride;
tmpKnot = knotListD[40 + left_idx_w];
tmpKnot1 = knotListD[40 + left_idx_w + 1];
float z_stride = tmpKnot1 - tmpKnot;
w = (w - tmpKnot) / z_stride;
extern __shared__ float shared_array[];
// 算出该线程负责的采样点的 B 样条体值
// fu 表示J_bar矩阵第一列三个元素:偏F_bar_x偏u、偏F_bar_y偏u、偏F_bar_z偏u
// fv 表示J_bar矩阵第二列三个元素:偏F_bar_x偏v、偏F_bar_y偏v、偏F_bar_z偏v
float3 result, fu, fv;
BSplineVolumeValueMatrixD_combine(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW,
result, fu, fv);
__syncthreads();
//sampleValueD[index2c(localIdx, triangleIdx , m + c)] = result.x;
//sampleValueD[index2c(localIdx, triangleIdx + f , m + c)] = result.y;
//sampleValueD[index2c(localIdx, triangleIdx + f * 2, m + c)] = result.z;
vertexPtrVBO_truth[i * 3 + 0] = result.x;
vertexPtrVBO_truth[i * 3 + 1] = result.y;
vertexPtrVBO_truth[i * 3 + 2] = result.z;
//printf("vertexPtrVBO = %f, %f, %f\n", vertexPtrVBO_truth[i * 3], vertexPtrVBO_truth[i * 3 + 1], vertexPtrVBO_truth[i * 3 + 2]);
///////////////////////////////////////////////////////////////////////////////
// fw 表示J_bar矩阵第三列三个元素:偏F_bar_x偏w、偏F_bar_y偏w、偏F_bar_z偏w
float3 fw = BSplineVolumeValueMatrixDw(u, v, w, shared_array,
left_idx_u - (orderU - 1), left_idx_v - (orderV - 1), left_idx_w - (orderW - 1),
orderU, orderV, orderW);
//__syncthreads();
u = normalParamListD_teapot[i].x;
v = normalParamListD_teapot[i].y;
w = normalParamListD_teapot[i].z;
//float *sampleNormalD = sampleValueD + 3 * f * (m + c);
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第一行三个元素
float J_bar_star_T_0 = fv.y * fw.z - fw.y * fv.z;
float J_bar_star_T_1 = fw.y * fu.z - fu.y * fw.z;
float J_bar_star_T_2 = fu.y * fv.z - fv.y * fu.z;
normalPtrVBO_truth[i * 3 + 0] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//sampleNormalD[index2c(localIdx, triangleIdx, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第二行三个元素
J_bar_star_T_0 = fv.z * fw.x - fw.z * fv.x;
J_bar_star_T_1 = fw.z * fu.x - fu.z * fw.x;
J_bar_star_T_2 = fu.z * fv.x - fv.z * fu.x;
normalPtrVBO_truth[i * 3 + 1] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//sampleNormalD[index2c(localIdx, triangleIdx + f, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
// J_bar_star_T_[012]表示J_bar的伴随矩阵的转置(即J_bar*T)的第三行三个元素
J_bar_star_T_0 = fv.x * fw.y - fw.x * fv.y;
J_bar_star_T_1 = fw.x * fu.y - fu.x * fw.y;
J_bar_star_T_2 = fu.x * fv.y - fv.x * fu.y;
normalPtrVBO_truth[i * 3 + 2] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//sampleNormalD[index2c(localIdx, triangleIdx + f * 2, m + c)] = u * J_bar_star_T_0 * x_stride + v * J_bar_star_T_1 * y_stride + w * J_bar_star_T_2 * z_stride;
//float tx = normalPtrVBO_truth[i * 3 + 0];
//float ty = normalPtrVBO_truth[i * 3 + 1];
//float tz = normalPtrVBO_truth[i * 3 + 2];
//float length = sqrt(tx * tx + ty * ty + tz * tz);
//tx /= length;
//ty /= length;
//tz /= length;
//printf("ori = %f, %f, %f\tdeformed = %f, %f, %f\n", u, v, w, tx, ty, tz);
}
using namespace objdata;
float color_map_vertex(const VertexCoord &v0, const VertexCoord v1, float range)
{
return (v0 - v1).norm() / range;
}
float color_map_normal(const NormalCoord &n0, const NormalCoord &n1, float range)
{
float result = 2 * asin((n0 - n1).norm() * 0.5) / range;
if (result < 0)
result = 0;
else if (result > 1)
result = 1;
return result;
//return 2 * asin((n0 - n1).norm() * 0.5) / range;
}
float *texture_coord;
void tessellateD_truth(bool adjust_silhouette, bool firstLoad, vector<int> &teapotFaceList, vector<int> &belongs_to_origin, int u_seg, int v_seg)
{
cudaError_t cymError;
//cymError = cudaMemcpy(my_to_truth_table, my_to_truth_tableD, sizeof(int) * samplePointPerTriangle * triangleNum, cudaMemcpyDeviceToHost);
//if (cymError)
//cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
size_t size2 = sizeof(float) * samplePointPerTriangle * triangleNum * 2;
size_t size3 = sizeof(float) * samplePointPerTriangle * triangleNum * 3;
size_t size3_truth = sizeof(float) * vertexCount_teapot * 3;
cymError = cudaGetLastError();
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
cudaGraphicsMapResources(1, &normalVBO_CUDA, 0);
cudaGraphicsMapResources(1, &vertexVBO_CUDA, 0);
cudaGraphicsMapResources(1, &texCoordVBO_CUDA, 0);
cudaGraphicsMapResources(1, &texCoord3DVBO_CUDA, 0);
cudaGraphicsMapResources(1, &normalVBO_CUDA_truth, 0);
cudaGraphicsMapResources(1, &vertexVBO_CUDA_truth, 0);
cudaGraphicsResourceGetMappedPointer((void**)&normalPtrVBO, &size3, normalVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&vertexPtrVBO, &size3, vertexVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&texCoordPtrVBO, &size2, texCoordVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&texCoord3DPtrVBO, &size2, texCoord3DVBO_CUDA);
cudaGraphicsResourceGetMappedPointer((void**)&normalPtrVBO_truth, &size3_truth, normalVBO_CUDA_truth);
cudaGraphicsResourceGetMappedPointer((void**)&vertexPtrVBO_truth, &size3_truth, vertexVBO_CUDA_truth);
// 变形基准茶壶
int block_size = 128;
int block_num = ceil(static_cast<double>(vertexCount_teapot) / block_size);
deformTeapot<<<block_num, block_size, sizeof(float) * block_size * 13>>>
(vertexParamListD_teapot, normalParamListD_teapot,
vertexCount_teapot,
normalPtrVBO_truth, vertexPtrVBO_truth,
order[U], order[V], order[W],
ctrlPointNum[U], ctrlPointNum[V], ctrlPointNum[W]);
cymError = cudaGetLastError();
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
// 找对应
if (firstLoad)
{
texture_coord = new float[size2 / sizeof(float)];
cout << "第一次,找对应" << endl;
cout << "block_num = " << block_num << endl;
int size = belongs_to_origin.size();
int *belongs_to_originD;
cudaMalloc((void**)&belongs_to_originD, sizeof(int) * size);
//cout << "belongsize = " << size << endl;
cymError = cudaMemcpy(belongs_to_originD, &belongs_to_origin[0], sizeof(int) * size, cudaMemcpyHostToDevice);
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
int block_size = 128;
int block_num = ceil(static_cast<double>(triangleNum * samplePointPerTriangle) / block_size);
cout << "my_to_truth.blockNum = " << block_num << endl;
my_to_truth<<<block_num, block_size>>>(triangleNum, samplePointPerTriangle,
(u_seg + 1) * (v_seg + 1), vertexPtrVBO, vertexPtrVBO_truth,
my_to_truth_tableD, belongs_to_originD);
cymError = cudaGetLastError();
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
cymError = cudaMemcpy(my_to_truth_table, my_to_truth_tableD, sizeof(int) * samplePointPerTriangle * triangleNum, cudaMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
//cout << "triangleNum = " << triangleNum << ", samplePointPerTriangle = "
//<< samplePointPerTriangle << endl;
//for (int i = 0; i < samplePointPerTriangle * triangleNum; ++i)
//{
//cout << my_to_truth_table[i] << " ";
//if (i % 20 == 19)
//cout << endl;
//}
//int *tttt = new int[size];
//cudaMemcpy(tttt, belongs_to_originD, sizeof(int) * size, cudaMemcpyDeviceToHost);
//for (int i = 0; i < size; ++i)
//{
//cout << tttt[i] << " ";
//if (i % 20 == 19)
//cout << endl;
//}
//delete []tttt;
}
/*------------------------ 测量误差 ----------------------------*/
/* 顶点误差 */
float *result = new float[size3 / sizeof(float)];
float *result_truth = new float[size3_truth / sizeof(float)];
cymError = cudaMemcpy(result, vertexPtrVBO, size3, cudaMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
cymError = cudaMemcpy(result_truth, vertexPtrVBO_truth, size3_truth, cudaMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
double vertex_error_ave_max = 0.0, vertex_error_max_max = 0.0;
double normal_error_ave_max = 0.0, normal_error_max_max = 0.0;
double error_ave = 0.0, error_max = 0.0;
for (int i = 0; i < samplePointPerTriangle * triangleNum; ++i)
{
double x0 = result[i * 3];
double y0 = result[i * 3 + 1];
double z0 = result[i * 3 + 2];
int real_idx = my_to_truth_table[i];
double x1 = result_truth[real_idx * 3];
double y1 = result_truth[real_idx * 3 + 1];
double z1 = result_truth[real_idx * 3 + 2];
double error = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1) + (z0 - z1) * (z0 - z1));
//cout << "error = " << error << endl;
error_ave += error;
if (error_max < error)
error_max = error;
float vertex_diff = color_map_vertex(VertexCoord(x0, y0, z0), VertexCoord(x1, y1, z1), 0.04);
texture_coord[i * 2] = vertex_diff;
texture_coord[i * 2 + 1] = 0.5;
}
cudaMemcpy(texCoordPtrVBO, texture_coord, size2, cudaMemcpyHostToDevice);
/*cout << "eeeeee samplePonitPerTriangle = " << samplePointPerTriangle << endl;*/
/*cout << "eeeeee triangleNum = " << triangleNum << endl;*/
/*cout << "eeeeee samplePonitPerTriangle * triangleNum = " << samplePointPerTriangle * triangleNum << endl;*/
/*cout << "eeeeee error = " << error_ave / (samplePointPerTriangle * triangleNum)*/
/*<< ", error_max = " << error_max << endl;*/
if (error_ave > vertex_error_ave_max)
vertex_error_ave_max = error_ave;
if (error_max > vertex_error_max_max)
vertex_error_max_max = error_max;
if (adjust_silhouette)
cout << "调整过,误差大" << endl;
else
cout << "未调整,误差小" << endl;
cout << "eeeeee 平均顶点误差 = " << vertex_error_ave_max / (samplePointPerTriangle * triangleNum) << ", 最大顶点误差 = " << vertex_error_max_max << endl;
/* 体积误差 */
double volume = 0.0;
for (int f = 0; f < triangleNum; ++f)
{
for (int i = 0; i < segmentPerEdge; ++i)
{
for (int j = 0; j <= i; ++j)
{
// smooth FFD算法结果
double v0x = result[samplePointPerTriangle * 3 * f + triangleCoord(i, j) * 3 + 0];
double v0y = result[samplePointPerTriangle * 3 * f + triangleCoord(i, j) * 3 + 1];
double v0z = result[samplePointPerTriangle * 3 * f + triangleCoord(i, j) * 3 + 2];
double v1x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 0];
double v1y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 1];
double v1z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 2];
double v2x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 0];
double v2y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 1];
double v2z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 2];
volume += (v0z + v1z + v2z) * ((v1x - v0x) * (v2y - v0y) - (v2x - v0x) * (v1y - v0y));
if (i < segmentPerEdge - 1)
{
double v0x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 0];
double v0y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 1];
double v0z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j) * 3 + 2];
double v1x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 2, j + 1) * 3 + 0];
double v1y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 2, j + 1) * 3 + 1];
double v1z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 2, j + 1) * 3 + 2];
double v2x = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 0];
double v2y = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 1];
double v2z = result[samplePointPerTriangle * 3 * f + triangleCoord(i + 1, j + 1) * 3 + 2];
volume += (v0z + v1z + v2z) * ((v1x - v0x) * (v2y - v0y) - (v2x - v0x) * (v1y - v0y));
}
}
}
}
volume /= 6;
double volume_truth = 0.0;
for (vector<int>::size_type i = 0; i < teapotFaceList.size() / 3; ++i)
{
int id0 = teapotFaceList[i * 3];
int id1 = teapotFaceList[i * 3 + 1];
int id2 = teapotFaceList[i * 3 + 2];
double v0x = result_truth[id0 * 3];
double v0y = result_truth[id0 * 3 + 1];
double v0z = result_truth[id0 * 3 + 2];
double v1x = result_truth[id1 * 3];
double v1y = result_truth[id1 * 3 + 1];
double v1z = result_truth[id1 * 3 + 2];
double v2x = result_truth[id2 * 3];
double v2y = result_truth[id2 * 3 + 1];
double v2z = result_truth[id2 * 3 + 2];
volume_truth += (v0z + v1z + v2z) * ((v1x - v0x) * (v2y - v0y) - (v2x - v0x) * (v1y - v0y));
}
volume_truth /= 6;
cout << "eeeeee 近似体积 = " << volume << ", 真实体积 = " << volume_truth << endl;
cout << "eeeeee 体积误差 = " << volume - volume_truth << ", 误差率 = " << fabs(volume - volume_truth) / volume_truth << endl;
/* 法向误差 */
cymError = cudaMemcpy(result, normalPtrVBO, size3, cudaMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
cymError = cudaMemcpy(result_truth, normalPtrVBO_truth, size3_truth, cudaMemcpyDeviceToHost);
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
error_ave = 0.0, error_max = 0.0;
float x0_max, y0_max, z0_max, x1_max, y1_max, z1_max;
const float PI = 3.14159265358979;
for (int i = 0; i < samplePointPerTriangle * triangleNum; ++i)
{
double x0 = result[i * 3];
double y0 = result[i * 3 + 1];
double z0 = result[i * 3 + 2];
int real_idx = my_to_truth_table[i];
double x1 = result_truth[real_idx * 3];
double y1 = result_truth[real_idx * 3 + 1];
double z1 = result_truth[real_idx * 3 + 2];
double length = sqrt(x0 * x0 + y0 * y0 + z0 * z0);
x0 /= length; y0 /= length; z0 /= length;
length = sqrt(x1 * x1 + y1 * y1 + z1 * z1);
x1 /= length; y1 /= length; z1 /= length;
//cout << "ori = " << x0 << ", " << y0 << ", " << z0 << "\t"
//<< "deform = " << x1 << ", " << y1 << ", " << z1 << endl;
double error = sqrt((x0 - x1) * (x0 - x1) + (y0 - y1) * (y0 - y1) + (z0 - z1) * (z0 - z1));
error = 2 * asin(error * 0.5);
//error = 1 * asin(error / 1);
error_ave += error;
if (error_max < error)
{
error_max = error;
x0_max = x0;
y0_max = y0;
z0_max = z0;
x1_max = x1;
y1_max = y1;
z1_max = z1;
}
//float normal_diff = color_map_normal(VertexCoord(x0, y0, z0), VertexCoord(x1, y1, z1), PI / 3);
float normal_diff = color_map_normal(VertexCoord(x0, y0, z0), VertexCoord(x1, y1, z1), PI / 20);
texture_coord[i * 2] = normal_diff;
texture_coord[i * 2 + 1] = 0.5;
}
cudaMemcpy(texCoord3DPtrVBO, texture_coord, size2, cudaMemcpyHostToDevice);
if (error_ave > normal_error_ave_max)
normal_error_ave_max = error_ave;
if (error_max > normal_error_max_max)
normal_error_max_max = error_max;
cout << "max0 = " << x0_max << ", " << y0_max << ", " << z0_max;
cout << "\tmax1 = " << x1_max << ", " << y1_max << ", " << z1_max << endl;
cout << "eeeeee 平均法向误差(角度) = " << normal_error_ave_max / (samplePointPerTriangle * triangleNum) * 180 / PI
<< ", 最大法向误差(角度) = " << normal_error_max_max * 180 / PI << endl << endl;
cudaGraphicsUnmapResources(1, &normalVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &vertexVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &texCoordVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &texCoord3DVBO_CUDA, 0);
cudaGraphicsUnmapResources(1, &normalVBO_CUDA_truth, 0);
cudaGraphicsUnmapResources(1, &vertexVBO_CUDA_truth, 0);
cymError = cudaGetLastError();
if (cymError)
cout << __FILE__ << "第" << __LINE__ << "行, 错误代码" << cymError << ": " << cudaGetErrorString(cymError) << endl;
delete []result;
delete []result_truth;
/*---------------------- 测量误差完成 --------------------------*/
}
//#endif
/************************************************************************************************************/
void setGLDevice()
{
cudaGLSetGLDevice(0);
}
/* 使用缓冲区对象进行 cuda 和 OpenGL 协同工作之前,需要进行一些初始化 */
void regGLBuffer()
{
printCudaError(__FILE__, __FUNCTION__, __LINE__);
if (registered)
{
cudaGraphicsUnregisterResource(normalVBO_CUDA);
cudaGraphicsUnregisterResource(texCoordVBO_CUDA);
cudaGraphicsUnregisterResource(texCoord3DVBO_CUDA);
cudaGraphicsUnregisterResource(vertexVBO_CUDA);
#ifdef LINE
cudaGraphicsUnregisterResource(baryVBO_CUDA);
cudaGraphicsUnregisterResource(oriBaryVBO_CUDA);
#endif
//#ifdef TRUTH
cudaGraphicsUnregisterResource(normalVBO_CUDA_truth);
cudaGraphicsUnregisterResource(vertexVBO_CUDA_truth);
//#endif
registered = false;
}
printCudaError(__FILE__, __FUNCTION__, __LINE__);
cudaGraphicsGLRegisterBuffer(&normalVBO_CUDA, normalVBO, cudaGraphicsMapFlagsWriteDiscard);
printCudaError(__FILE__, __FUNCTION__, __LINE__);
cudaGraphicsGLRegisterBuffer(&texCoordVBO_CUDA, texCoordVBO, cudaGraphicsMapFlagsWriteDiscard);
cudaGraphicsGLRegisterBuffer(&texCoord3DVBO_CUDA, texCoord3DVBO, cudaGraphicsMapFlagsWriteDiscard);
cudaGraphicsGLRegisterBuffer(&vertexVBO_CUDA, vertexVBO, cudaGraphicsMapFlagsWriteDiscard);
#ifdef LINE
cudaGraphicsGLRegisterBuffer(&baryVBO_CUDA, baryVBO, cudaGraphicsMapFlagsWriteDiscard);
cudaGraphicsGLRegisterBuffer(&oriBaryVBO_CUDA, oriBaryVBO, cudaGraphicsMapFlagsWriteDiscard);
#endif
//#ifdef TRUTH
cudaGraphicsGLRegisterBuffer(&normalVBO_CUDA_truth, normalVBO_truth, cudaGraphicsMapFlagsWriteDiscard);
cudaGraphicsGLRegisterBuffer(&vertexVBO_CUDA_truth, vertexVBO_truth, cudaGraphicsMapFlagsWriteDiscard);
//#endif
registered = true;
printCudaError(__FILE__, __FUNCTION__, __LINE__);
}
/************************************************************************************************************/
void cudaFreeNonZero(void **ptr)
{
if (*ptr)
{
cudaFree(*ptr);
*ptr = 0;
}
}
void freeTessMemD()
{
cudaFreeNonZero((void**)&BqD);
cudaFreeNonZero((void**)&BqD_PN);
cudaFreeNonZero((void**)&RD);
cudaFreeNonZero((void**)&my_to_truth_tableD);
delete []my_to_truth_table;
#ifdef TRUTH
cudaFreeNonZero((void**)&BqD_truth);
cudaFreeNonZero((void**)&BBD_truth);
cudaFreeNonZero((void**)&RD_truth);
#endif
}
void freeModelMemD()
{
cudaFreeNonZero((void**)&vertexParamListD);
cudaFreeNonZero((void**)&vertexCoordListD);
cudaFreeNonZero((void**)&vertexParamListD_teapot);
//cudaFreeNonZero((void**)&vertexCoordListD_teapot);
cudaFreeNonZero((void**)&triangleListD);
cudaFreeNonZero((void**)&sampleValueD);
cudaFreeNonZero((void**)&sampleValueD_PN);
cudaFreeNonZero((void**)&triangleCtrlPointD);
cudaFreeNonZero((void**)&triangleCtrlPointD_PN);
cudaFreeNonZero((void**)&triangleNormalCtrlPointD_PN);
cudaFreeNonZero((void**)&triangle_adjacent_tableD);
#ifdef TRUTH
cudaFreeNonZero((void**)&sampleValueD_truth);
cudaFreeNonZero((void**)&B_1D_truth);
#endif
degreeMemD = 0;
modelMemD = 0;
#ifdef DRAW_TRIANGULAR_CTRL_POINTS
delete []triangular_ctrl_points;
#endif
freeTessMemD();
}
void freeMemD()
{
if (registered)
{
cudaGraphicsUnregisterResource(normalVBO_CUDA);
cudaGraphicsUnregisterResource(texCoordVBO_CUDA);
cudaGraphicsUnregisterResource(texCoord3DVBO_CUDA);
cudaGraphicsUnregisterResource(vertexVBO_CUDA);
#ifdef LINE
cudaGraphicsUnregisterResource(baryVBO_CUDA);
cudaGraphicsUnregisterResource(oriBaryVBO_CUDA);
#endif
//#ifdef TRUTH
cudaGraphicsUnregisterResource(normalVBO_CUDA_truth);
cudaGraphicsUnregisterResource(vertexVBO_CUDA_truth);
//#endif
registered = false;
}
if (cublas_handle)
{
cublasDestroy(cublas_handle);
}
cudaFreeNonZero((void**)&matrixFittingIdxD);
cudaFreeNonZero((void**)&matrixFittingD);
permanentMemD = 0;
freeModelMemD();
}
|
9958c25ba7327ea09897f1f4cef70a74810f65e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <wb.h>
#define MASK_WIDTH 5
#define O_TILE_WIDTH 16
#define clamp(x) (min(max((x), 0.0), 1.0))
#define BLOCK_WIDTH (O_TILE_WIDTH + MASK_WIDTH - 1)
#define mask_radius (MASK_WIDTH / 2)
//@@ INSERT CODE HERE
//implement the tiled 2D convolution kernel with adjustments for channels
//use shared memory to reduce the number of global accesses, handle the boundary conditions when loading input list elements into the shared memory
//clamp your output values
__global__ void convolution_2D_kernel(float *P, const float* __restrict__ N, int height, int width, int channels, const float* __restrict__ M)
{
__shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int row_i = row_o - mask_radius;
int col_i = col_o - mask_radius;
if ((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < width))
{
Ns[ty][tx] = N[(row_i * width + col_i) * channels + blockIdx.z];
}
else {
Ns[ty][tx] = 0.0f;
}
__syncthreads();
float output = 0.0f;
if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH)
{
for (int i = 0; i < MASK_WIDTH; i++) {
for (int j = 0; j < MASK_WIDTH; j++) {
output += M[i * MASK_WIDTH + j] * Ns[i + ty][j + tx];
}
}
if (row_o < height && col_o < width)
P[(row_o*width + col_o) * channels + blockIdx.z] = clamp(output);
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */
assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
//allocate device memory
int sizeMask = sizeof(float) * maskColumns * maskRows;
int sizeInputImage = sizeof(float) * imageWidth * imageHeight * imageChannels;
hipMalloc((void**)&deviceMaskData, sizeMask);
hipMalloc((void**)&deviceInputImageData, sizeInputImage);
hipMalloc((void**)&deviceOutputImageData, sizeInputImage);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
//copy host memory to device
hipMemcpy(deviceMaskData, hostMaskData, sizeMask, hipMemcpyHostToDevice);
hipMemcpy(deviceInputImageData, hostInputImageData, sizeInputImage, hipMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
//initialize thread block and kernel grid dimensions
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 dimGrid(((imageWidth - 1) / O_TILE_WIDTH) + 1, ((imageHeight - 1) / O_TILE_WIDTH) + 1, imageChannels);
//invoke CUDA kernel
convolution_2D_kernel << < dimGrid, dimBlock >> > (deviceOutputImageData, deviceInputImageData, imageHeight, imageWidth, imageChannels, deviceMaskData);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
//copy results from device to host
hipMemcpy(hostOutputImageData, deviceOutputImageData, sizeInputImage, hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ INSERT CODE HERE
//deallocate device memory
hipFree(deviceMaskData);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
| 9958c25ba7327ea09897f1f4cef70a74810f65e3.cu | #include <wb.h>
#define MASK_WIDTH 5
#define O_TILE_WIDTH 16
#define clamp(x) (min(max((x), 0.0), 1.0))
#define BLOCK_WIDTH (O_TILE_WIDTH + MASK_WIDTH - 1)
#define mask_radius (MASK_WIDTH / 2)
//@@ INSERT CODE HERE
//implement the tiled 2D convolution kernel with adjustments for channels
//use shared memory to reduce the number of global accesses, handle the boundary conditions when loading input list elements into the shared memory
//clamp your output values
__global__ void convolution_2D_kernel(float *P, const float* __restrict__ N, int height, int width, int channels, const float* __restrict__ M)
{
__shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int row_i = row_o - mask_radius;
int col_i = col_o - mask_radius;
if ((row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < width))
{
Ns[ty][tx] = N[(row_i * width + col_i) * channels + blockIdx.z];
}
else {
Ns[ty][tx] = 0.0f;
}
__syncthreads();
float output = 0.0f;
if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH)
{
for (int i = 0; i < MASK_WIDTH; i++) {
for (int j = 0; j < MASK_WIDTH; j++) {
output += M[i * MASK_WIDTH + j] * Ns[i + ty][j + tx];
}
}
if (row_o < height && col_o < width)
P[(row_o*width + col_o) * channels + blockIdx.z] = clamp(output);
}
}
int main(int argc, char *argv[]) {
wbArg_t arg;
int maskRows;
int maskColumns;
int imageChannels;
int imageWidth;
int imageHeight;
char *inputImageFile;
char *inputMaskFile;
wbImage_t inputImage;
wbImage_t outputImage;
float *hostInputImageData;
float *hostOutputImageData;
float *hostMaskData;
float *deviceInputImageData;
float *deviceOutputImageData;
float *deviceMaskData;
arg = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(arg, 0);
inputMaskFile = wbArg_getInputFile(arg, 1);
inputImage = wbImport(inputImageFile);
hostMaskData = (float *)wbImport(inputMaskFile, &maskRows, &maskColumns);
assert(maskRows == MASK_WIDTH); /* mask height is fixed to 5 */
assert(maskColumns == MASK_WIDTH); /* mask width is fixed to 5 */
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
wbTime_start(GPU, "Doing GPU Computation (memory + compute)");
wbTime_start(GPU, "Doing GPU memory allocation");
//@@ INSERT CODE HERE
//allocate device memory
int sizeMask = sizeof(float) * maskColumns * maskRows;
int sizeInputImage = sizeof(float) * imageWidth * imageHeight * imageChannels;
cudaMalloc((void**)&deviceMaskData, sizeMask);
cudaMalloc((void**)&deviceInputImageData, sizeInputImage);
cudaMalloc((void**)&deviceOutputImageData, sizeInputImage);
wbTime_stop(GPU, "Doing GPU memory allocation");
wbTime_start(Copy, "Copying data to the GPU");
//@@ INSERT CODE HERE
//copy host memory to device
cudaMemcpy(deviceMaskData, hostMaskData, sizeMask, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInputImageData, hostInputImageData, sizeInputImage, cudaMemcpyHostToDevice);
wbTime_stop(Copy, "Copying data to the GPU");
wbTime_start(Compute, "Doing the computation on the GPU");
//@@ INSERT CODE HERE
//initialize thread block and kernel grid dimensions
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 dimGrid(((imageWidth - 1) / O_TILE_WIDTH) + 1, ((imageHeight - 1) / O_TILE_WIDTH) + 1, imageChannels);
//invoke CUDA kernel
convolution_2D_kernel << < dimGrid, dimBlock >> > (deviceOutputImageData, deviceInputImageData, imageHeight, imageWidth, imageChannels, deviceMaskData);
wbTime_stop(Compute, "Doing the computation on the GPU");
wbTime_start(Copy, "Copying data from the GPU");
//@@ INSERT CODE HERE
//copy results from device to host
cudaMemcpy(hostOutputImageData, deviceOutputImageData, sizeInputImage, cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying data from the GPU");
wbTime_stop(GPU, "Doing GPU Computation (memory + compute)");
wbSolution(arg, outputImage);
//@@ INSERT CODE HERE
//deallocate device memory
cudaFree(deviceMaskData);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
free(hostMaskData);
wbImage_delete(outputImage);
wbImage_delete(inputImage);
return 0;
}
|
2c362f154f5042ec59586fcb3a0fee3c59a322f9.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run matrix multiplication kernels using functions and
data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA
Volta GPU.
Writing a single high performance matrix multiplication kernel is hard but
do-able. Whereas writing high performance kernels at scale which works for
multiple problem sizes with good abstractions is really hard. CUTLASS solves
this problem by providing simplified abstractions to compose multiple sections
of gemm kernel. When used properly, the kernels can hit peak performance of GPU
easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at
each thread, warp and thread-block level, they compute on their own tile-size
with higher level of tile sizes being composed from lower level ones. Multiple
thread-tiles (tile size each thread computes) can be used to form warp-tiles
(tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how matrices are laid out in the
memory and how the kernel can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set matrices will
be used to compute output of matrix multiplication.
First, we setup the data types of matrices A, B, C and D along with alpha, beta
as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the
kernels first compute A * B and leaves the rest of the computation to end of the
kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B)
and C. We call this as epilogue of kernel. Hence, we setup data types for alpha
and beta to be equal to ElementComputeEpilogue = float. As we want to MMA
instructions on Volta and they support only half-precision floating point (fp16
or half), we use data type for elements in input matrix A and B as
cutlass::half_t. Volta also supports accumulation of partial dot product to
fp32, which can store wider range of numbers, we use it as data type of output
matrix elements and accumulation. We convey this to CUTLASS kernel by
initializing template variables ElementAccumulator (float),
ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB
(cutlass::half_t), ElementOutput (float). Communicating just the data type is
not enough. As the data is laid out linearly in memory, we have to convey the
layout of matrices. We do that by initializing template variable LayoutInputA to
column major cutlass variable, LayoutInputB to row major and LayoutOutput to row
major. Next, we setup rules to comptue alpha * X + beta * C which is called
epilogue of the kernel. We initialize template variable EpilogueOp, which takes
the data type of output ElementOutput (int32_t), the number of elements per
vector memory access (16), data type of accumulator (int32_t) and data type of
computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of
computation.
Second, we create template variables of tile sizes for thread-block, warp and
mma-op to 128x128x32, 64x64x4, 8x8x4 (MxNxK) respectively. When passed to
instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads
needed per thread-block, amount of shared memory, storing data in bank-conflict
free manner, and ton of other variables required to compose, intialize and
launch a high performance GEMM kernel. This is the beauty of CUTLASS, it
relieves developer from understanding and coding complicated hardware
optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a CTA. What are MMA pipelines?
MMA pipelines constitute the whole process of loading input data from global
memory to shared memory, loading data from shared memory to registers, doing
matrix multiplication, store to global memory. The below flow sequence shows a
typical mma pipeline.
matrix in global memory -> registers -> tile in shared memory -> registers ->
mma -> registers -> output to global memory
The problem with single pipeline is, each stage is synchronous which means, each
stage has to wait until the previous finished executing. There are stages in the
pipeline which do not have fixed latency, for example, the loads from global
memory and shared memory. Therefore, we can add one more pipeline with a phase
shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4)
registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null>
-> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in
shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to
global memory
This way, you can hide the second global memoroy load latency by doing
computation on already loaded input data.
There are few more template variables initialized such as, which threadblock
tile of output matrix is done which threadblock launched on an SM, CUDA SM
architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS
GEMM kernel using cutlass::gemm::device::Gemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS
kernel and run it. We use CUTLASS utilities to initialize, fill, compare
matrices as they are simple and doesn't come in the way of learning CUTLASS.
Once all the matrices are initialized and filled with data, create arguments
tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and
K = 4096), matrices, alpha, beta and the important one, split k-dimension
factor. Along with that, we query CUTLASS if any scratch-space memory required
by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference gemm kernel (from CUTLASS
utilities) to compare if the output from CUTLASS kernel is same as reference
GEMM kernel.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and
// computation between elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue =
ElementAccumulator; // <- data type of epilogue operations
using ElementInputA =
cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB =
cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices.
// Column Major for Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular
// SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm70;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128,
// N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp =
cutlass::gemm::GemmShape<64, 64,
32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8,
// N = 8, K = 4
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes ?
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::
value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear
// combination function
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<
ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput,
LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock,
ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>;
int run() {
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: "
<< hipGetErrorString(error) << std::endl;
return -1;
}
if (props.major != 7) {
std::cerr << "Volta Tensor Ops must be run on a machine with compute "
"capability of 70, 72, or 75."
<< std::endl;
// Return 0 so tests are considered passing if run on unsupported
// architectures or CUDA Toolkits.
return 0;
}
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N
// used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N
// used to store output from reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random
// data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on
// host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as
// arguments to launch instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix
// multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Initialize CUTLASS kernel with arguments and workspace pointer
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB,
LayoutInputB, ElementOutput, LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size, alpha, tensor_a.device_ref(),
tensor_b.device_ref(), beta, tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
hipDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(), tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
// Volta Tensor Core operations exposed with mma.sync are first available in
// CUDA 10.1.
//
// CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 ||
(__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA "
"10.1 Toolkit or later."
<< std::endl;
// Returning zero when built on older Toolkits so tests pass. The
// actions of this SDK example are no-op.
return 0;
} else {
return run();
}
}
| 2c362f154f5042ec59586fcb3a0fee3c59a322f9.cu | /***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice,
*this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
*notice, this list of conditions and the following disclaimer in the
*documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its
*contributors may be used to endorse or promote products derived from this
*software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
*AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
*IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT,
*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
*DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
*OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
*EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
This example shows how to run matrix multiplication kernels using functions and
data structures provided by CUTLASS using tensor cores; which we run on a NVIDIA
Volta GPU.
Writing a single high performance matrix multiplication kernel is hard but
do-able. Whereas writing high performance kernels at scale which works for
multiple problem sizes with good abstractions is really hard. CUTLASS solves
this problem by providing simplified abstractions to compose multiple sections
of gemm kernel. When used properly, the kernels can hit peak performance of GPU
easily.
CUTLASS divides a kernel into hierarchical composable sections. Which means, at
each thread, warp and thread-block level, they compute on their own tile-size
with higher level of tile sizes being composed from lower level ones. Multiple
thread-tiles (tile size each thread computes) can be used to form warp-tiles
(tile size each warp computes) and multiple warp tiles can be used to compute
threadblock-tile (tile size computed by a threadblock).
In thie example, we split variable initialization into
1. Setting up data properties : describes how matrices are laid out in the
memory and how the kernel can view them (logical to physical mapping)
2. Setting up computation properties : describes how the above set matrices will
be used to compute output of matrix multiplication.
First, we setup the data types of matrices A, B, C and D along with alpha, beta
as the equation for GEMM is D = alpha * A * B + beta * C. In CUTLASS, the
kernels first compute A * B and leaves the rest of the computation to end of the
kernel as alpha * X + beta * C is a simple element-wise operation on X (A * B)
and C. We call this as epilogue of kernel. Hence, we setup data types for alpha
and beta to be equal to ElementComputeEpilogue = float. As we want to MMA
instructions on Volta and they support only half-precision floating point (fp16
or half), we use data type for elements in input matrix A and B as
cutlass::half_t. Volta also supports accumulation of partial dot product to
fp32, which can store wider range of numbers, we use it as data type of output
matrix elements and accumulation. We convey this to CUTLASS kernel by
initializing template variables ElementAccumulator (float),
ElementComputeEpilogue (float), ElementInputA (cutlass::half_t), ElementInputB
(cutlass::half_t), ElementOutput (float). Communicating just the data type is
not enough. As the data is laid out linearly in memory, we have to convey the
layout of matrices. We do that by initializing template variable LayoutInputA to
column major cutlass variable, LayoutInputB to row major and LayoutOutput to row
major. Next, we setup rules to comptue alpha * X + beta * C which is called
epilogue of the kernel. We initialize template variable EpilogueOp, which takes
the data type of output ElementOutput (int32_t), the number of elements per
vector memory access (16), data type of accumulator (int32_t) and data type of
computation of linear combination (alpha * X + beta * C).
Now that we setup the properties of data, we have to setup properties of
computation.
Second, we create template variables of tile sizes for thread-block, warp and
mma-op to 128x128x32, 64x64x4, 8x8x4 (MxNxK) respectively. When passed to
instantiate CUTLASS GEMM kernel, it internally deduce the amount of threads
needed per thread-block, amount of shared memory, storing data in bank-conflict
free manner, and ton of other variables required to compose, intialize and
launch a high performance GEMM kernel. This is the beauty of CUTLASS, it
relieves developer from understanding and coding complicated hardware
optimizations which can easily go wrong.
CUTLASS also supports multiple MMA pipelines in a CTA. What are MMA pipelines?
MMA pipelines constitute the whole process of loading input data from global
memory to shared memory, loading data from shared memory to registers, doing
matrix multiplication, store to global memory. The below flow sequence shows a
typical mma pipeline.
matrix in global memory -> registers -> tile in shared memory -> registers ->
mma -> registers -> output to global memory
The problem with single pipeline is, each stage is synchronous which means, each
stage has to wait until the previous finished executing. There are stages in the
pipeline which do not have fixed latency, for example, the loads from global
memory and shared memory. Therefore, we can add one more pipeline with a phase
shift in mma kernel to hide latency from global and shared memory loads.
Finally, the pipeline in a kernel looks like
(1) matrix in global memory -> (2) registers -> (3) tile in shared memory -> (4)
registers -> (5) mma -> (6) registers -> (7) output to global memory (1) <null>
-> (2) <null> -> (3) matrix in global memory -> (4) registers -> (5) tile in
shared memory -> (6) registers -> (7) mma -> (8) registers -> (9) output to
global memory
This way, you can hide the second global memoroy load latency by doing
computation on already loaded input data.
There are few more template variables initialized such as, which threadblock
tile of output matrix is done which threadblock launched on an SM, CUDA SM
architecture of GPU you want to run on.
These are all put together to create a template variable which describes CUTLASS
GEMM kernel using cutlass::gemm::device::Gemm template.
The next step is to intialize physical data, instantiate and initialize CUTLASS
kernel and run it. We use CUTLASS utilities to initialize, fill, compare
matrices as they are simple and doesn't come in the way of learning CUTLASS.
Once all the matrices are initialized and filled with data, create arguments
tuple to launch CUTLASS kernel which takes problem size (M = 5120, N = 4096 and
K = 4096), matrices, alpha, beta and the important one, split k-dimension
factor. Along with that, we query CUTLASS if any scratch-space memory required
by the kernel we instantiated. If yes, we create it and pass it along with other
arguments created to intialize CUTLASS kernel then, the kernel is launched.
In this example, we later on launch a reference gemm kernel (from CUTLASS
utilities) to compare if the output from CUTLASS kernel is same as reference
GEMM kernel.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/device/gemm.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and
// computation between elements in input matrices.
using ElementAccumulator = float; // <- data type of accumulator
using ElementComputeEpilogue =
ElementAccumulator; // <- data type of epilogue operations
using ElementInputA =
cutlass::half_t; // <- data type of elements in input matrix A
using ElementInputB =
cutlass::half_t; // <- data type of elements in input matrix B
using ElementOutput = float; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices.
// Column Major for Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::ColumnMajor;
using LayoutInputB = cutlass::layout::RowMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular
// SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm70;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 32>; // <- threadblock tile M = 128,
// N = 128, K = 32
// This code section describes tile size a warp will compute
using ShapeMMAWarp =
cutlass::gemm::GemmShape<64, 64,
32>; // <- warp tile M = 64, N = 64, K = 32
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<8, 8, 4>; // <- MMA Op tile M = 8,
// N = 8, K = 4
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock =
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes ?
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::
value, // <- this is the number of elements per
// vectorized memory access. For half
// precision, it's 8 elements. This becomes
// the vector width of math instructions in
// epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear
// combination function
// Number of pipelines you want to use
constexpr int NumStages = 2;
using Gemm = cutlass::gemm::device::Gemm<
ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput,
LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock,
ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>;
int run() {
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: "
<< cudaGetErrorString(error) << std::endl;
return -1;
}
if (props.major != 7) {
std::cerr << "Volta Tensor Ops must be run on a machine with compute "
"capability of 70, 72, or 75."
<< std::endl;
// Return 0 so tests are considered passing if run on unsupported
// architectures or CUDA Toolkits.
return 0;
}
const int length_m = 5120;
const int length_n = 4096;
const int length_k = 4096;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
problem_size.mk()); // <- Create matrix A with dimensions M x K
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N
// used to store output from CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N
// used to store output from reference kernel
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4),
0); // <- Fill matrix A on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4),
0); // <- Fill matrix B on host with uniform-distribution random
// data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4),
0); // <- Fill matrix C on host with uniform-distribution random
// data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on
// host with zeros
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as
// arguments to launch instantiated CUTLASS kernel
typename Gemm::Arguments arguments{
problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix
// multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Initialize CUTLASS kernel with arguments and workspace pointer
cutlass::Status status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// Create instantiation for device reference gemm kernel
cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB,
LayoutInputB, ElementOutput, LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue>
gemm_device;
// Launch device reference gemm kernel
gemm_device(problem_size, alpha, tensor_a.device_ref(),
tensor_b.device_ref(), beta, tensor_c.device_ref(),
tensor_ref_d.device_ref());
// Wait for kernels to finish
cudaDeviceSynchronize();
// Copy output data from CUTLASS and reference kernel to host for comparison
tensor_d.sync_host();
tensor_ref_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(), tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
// Volta Tensor Core operations exposed with mma.sync are first available in
// CUDA 10.1.
//
// CUTLASS must be compiled with CUDA 10.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 10 ||
(__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Volta Tensor Core operations must be compiled with CUDA "
"10.1 Toolkit or later."
<< std::endl;
// Returning zero when built on older Toolkits so tests pass. The
// actions of this SDK example are no-op.
return 0;
} else {
return run();
}
}
|
14c1cb04a5a9404feb1dbf002a9c21c60f646fe4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2020 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha,
//const int num_elements,
Rect<1> bounds,
AffineAccessor<float, 1> ra_x,
AffineAccessor<float, 1> ra_y,
AffineAccessor<float, 1> ra_z)
// const float *x, const float *y, float *z)
{
int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x;
if (p <= bounds.hi)
ra_z[p] += alpha * ra_x[p] + ra_y[p];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
// get affine accessors for each of our three instances
AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst,
FID_X);
AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst,
FID_Y);
AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst,
FID_Z);
size_t num_elements = saxpy_args->bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
hipLaunchKernelGGL(( gpu_saxpy), dim3(total_ctas), dim3(cta_threads), 0, 0, saxpy_args->alpha, saxpy_args->bounds,
ra_x, ra_y, ra_z);
// LOOK: NO WAIT! :)
}
| 14c1cb04a5a9404feb1dbf002a9c21c60f646fe4.cu | /* Copyright 2020 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha,
//const int num_elements,
Rect<1> bounds,
AffineAccessor<float, 1> ra_x,
AffineAccessor<float, 1> ra_y,
AffineAccessor<float, 1> ra_z)
// const float *x, const float *y, float *z)
{
int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x;
if (p <= bounds.hi)
ra_z[p] += alpha * ra_x[p] + ra_y[p];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
// get affine accessors for each of our three instances
AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst,
FID_X);
AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst,
FID_Y);
AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst,
FID_Z);
size_t num_elements = saxpy_args->bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
gpu_saxpy<<<total_ctas, cta_threads>>>(saxpy_args->alpha, saxpy_args->bounds,
ra_x, ra_y, ra_z);
// LOOK: NO WAIT! :)
}
|
f4eed9bf10d8e815f9564740a1fffd73430e9db3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license; see accompanying LICENSE file
*
**/
namespace caffe2 {
namespace {
// input in (int8, NHWC), output in (fp32, NCHW)
template <typename In, typename Out>
__global__ void transform_kernel(
const int N,
const int C,
const int H,
const int W,
const float* mean,
const float* std,
const In* in,
Out* out) {
const int n = blockIdx.x;
const int nStride = C*H*W;
// pointers to data for this image
const In* input_ptr = &in[n*nStride];
Out* output_ptr = &out[n*nStride];
// either read or write uncoalesced - try reading
for (int c=0; c < C; ++c) {
for (int h=threadIdx.y; h < H; h += blockDim.y) {
for (int w=threadIdx.x; w < W; w += blockDim.x) {
int in_idx = c + C*w + C*W*h; // HWC
int out_idx = c*H*W + h*W + w; // CHW
output_ptr[out_idx] = convert::To<float,Out>(
(convert::To<In,float>(input_ptr[in_idx])-mean[c]) * std[c]);
}
}
}
}
}
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context) {
const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
auto* input_data = X.template data<T_IN>();
auto* output_data = Y->template mutable_data<T_OUT>();
hipLaunchKernelGGL(( transform_kernel<
T_IN, T_OUT>), dim3(N), dim3(dim3(16, 16)), 0, context->cuda_stream(),
N, C, H, W, mean.template data<float>(), std.template data<float>(),
input_data, output_data);
return true;
};
template bool TransformOnGPU<uint8_t, float, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
template bool TransformOnGPU<uint8_t, at::Half, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
} // namespace caffe2
| f4eed9bf10d8e815f9564740a1fffd73430e9db3.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/image/transform_gpu.h"
#include "caffe2/utils/conversions.h"
/**
*
* Copyright (c) 2016, NVIDIA CORPORATION, All rights reserved
* Distributed under 2-clause BSD license; see accompanying LICENSE file
*
**/
namespace caffe2 {
namespace {
// input in (int8, NHWC), output in (fp32, NCHW)
template <typename In, typename Out>
__global__ void transform_kernel(
const int N,
const int C,
const int H,
const int W,
const float* mean,
const float* std,
const In* in,
Out* out) {
const int n = blockIdx.x;
const int nStride = C*H*W;
// pointers to data for this image
const In* input_ptr = &in[n*nStride];
Out* output_ptr = &out[n*nStride];
// either read or write uncoalesced - try reading
for (int c=0; c < C; ++c) {
for (int h=threadIdx.y; h < H; h += blockDim.y) {
for (int w=threadIdx.x; w < W; w += blockDim.x) {
int in_idx = c + C*w + C*W*h; // HWC
int out_idx = c*H*W + h*W + w; // CHW
output_ptr[out_idx] = convert::To<float,Out>(
(convert::To<In,float>(input_ptr[in_idx])-mean[c]) * std[c]);
}
}
}
}
}
template <typename T_IN, typename T_OUT, class Context>
bool TransformOnGPU(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
Context* context) {
const int N = X.dim32(0), C = X.dim32(3), H = X.dim32(1), W = X.dim32(2);
auto* input_data = X.template data<T_IN>();
auto* output_data = Y->template mutable_data<T_OUT>();
transform_kernel<
T_IN, T_OUT><<<N, dim3(16, 16), 0, context->cuda_stream()>>>(
N, C, H, W, mean.template data<float>(), std.template data<float>(),
input_data, output_data);
return true;
};
template bool TransformOnGPU<uint8_t, float, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
template bool TransformOnGPU<uint8_t, at::Half, CUDAContext>(
Tensor& X,
Tensor* Y,
Tensor& mean,
Tensor& std,
CUDAContext* context);
} // namespace caffe2
|
fd41e5abd9bec9d4f215a51a6f4454f5a8fa59f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <time.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, hipGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
__global__ void array_mult_kernel(double *A, double *B, double *C)
{
C[blockIdx.x * blockDim.x + threadIdx.x] = A[blockIdx.x * blockDim.x + threadIdx.x] *
B[blockIdx.x * blockDim.x + threadIdx.x];
}
void array_mult_dev(double *d_A, double *d_B, double *d_C, int N)
{
int bSize;
int tSize = 32;
bSize = (N >> 5) + 1;
//while (res > 128)
{
//bSize = res >> 5; // 32 = 2 ^ 5
hipLaunchKernelGGL(( array_mult_kernel), dim3(bSize), dim3(tSize), 0, 0, d_A, d_B, d_C);
}
hipDeviceSynchronize();
}
void array_mult_host_naive(double *A, double *B, double *C, const int N)
{
for (int idx = 0; idx < N; idx++)
{
C[idx] = A[idx] * B[idx];
}
}
void array_mult_host_test_2(double *d_A, double *d_B, double *d_C, const int N)
{
double *buffA;
double *buffB;
double *buffC;
buffA = (double*)malloc(sizeof(double) * N);
buffB = (double*)malloc(sizeof(double) * N);
buffC = (double*)malloc(sizeof(double) * N);
hipMemcpy(buffA, d_A, sizeof(double) * N, hipMemcpyDeviceToHost);
hipMemcpy(buffB, d_B, sizeof(double) * N, hipMemcpyDeviceToHost);
for (int j = 0; j < N; ++j)
{
buffC[j] = buffA[j] * buffB[j];
}
hipMemcpy(d_C, buffC, sizeof(double) * N, hipMemcpyHostToDevice);
free(buffA);
free(buffB);
free(buffC);
}
void array_mult_host_test_3(double *h_A, double *h_B, double *h_C,
double *d_A, double *d_B, double *d_C,
const int N)
{
hipMemcpy(h_A, d_A, sizeof(double) * N, hipMemcpyDeviceToHost);
hipMemcpy(h_B, d_B, sizeof(double) * N, hipMemcpyDeviceToHost);
for (int j = 0; j < N; ++j)
{
h_C[j] = h_A[j] * h_B[j];
}
hipMemcpy(d_C, h_C, sizeof(double) * N, hipMemcpyHostToDevice);
}
void array_mult_host_hybr(double *d_A, double *d_B, double *d_C, int N)
{
int off = 0;
if (N >= 1024)
{
int bSize = N / 1024;
off = bSize * 1024;
N -= off;
hipLaunchKernelGGL(( array_mult_kernel), dim3(bSize), dim3(32), 0, 0, d_A, d_B, d_C);
}
array_mult_host_test_2(&d_A[off], &d_B[off], &d_C[off], N);
hipDeviceSynchronize();
}
void initialData(double *ip,int size)
{
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; i++)
{
ip[i] = (double)( rand() & 0xFF )/10.0;
}
}
bool checkResult(double *h_res, double *d_res, int n)
{
double buff;
for (int i = 0; i < n; ++i)
{
hipMemcpy(&buff, &d_res[i], sizeof(double), hipMemcpyDeviceToHost);
if (abs(buff - h_res[i]) >= 1.E-12)
{
printf("Test failed on i=%d, host: %lf, dev: %lf\n", i, h_res[i], buff);
return false;
}
//printf("%lf, %lf\n", h_res[i], buff);
}
printf("Test pass\n");
return true;
}
void launchTest(int N, int repeat)
{
double tStart, tEnd, sum;
double *h_A, *h_B, *h_C;
double *d_A, *d_B, *d_C;
h_A = (double *)malloc(sizeof(double) * N);
h_B = (double *)malloc(sizeof(double) * N);
h_C = (double *)malloc(sizeof(double) * N);
hipMalloc((void **)&d_A, sizeof(double) * N);
hipMalloc((void **)&d_B, sizeof(double) * N);
hipMalloc((void **)&d_C, sizeof(double) * N);
printf("\nInitializing data (size=%d)\n", N);
initialData(h_A, N);
initialData(h_B, N);
hipMemcpy(d_A, h_A, sizeof(double) * N, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, sizeof(double) * N, hipMemcpyHostToDevice);
// sum = 0;
// for (int i = 0; i < repeat; ++i)
// {
// tStart = cpuSecond();
// array_mult_host_naive(h_A, h_B, h_C, N);
// tEnd = cpuSecond();
// sum += (tEnd - tStart);
// }
// printf("%30s - %12.9lf ms\n", "array_mult_host_naive", (sum / repeat * 1000));
// sum = 0;
// for (int i = 0; i < repeat; ++i)
// {
// tStart = cpuSecond();
// array_mult_host_test_3(h_A, h_B, h_C, d_A, d_B, d_C, N);
// tEnd = cpuSecond();
// sum += (tEnd - tStart);
// }
// printf("%30s - %12.9lf ms\n", "array_mult_host_test_3", (sum / repeat * 1000));
sum = 0;
for (int i = 0; i < repeat; ++i)
{
tStart = cpuSecond();
array_mult_host_test_2(d_A, d_B, d_C, N);
tEnd = cpuSecond();
sum += (tEnd - tStart);
}
printf("%30s - %12.9lf ms\n", "array_mult_host_test_2", (sum / repeat * 1000));
sum = 0;
for (int i = 0; i < repeat; ++i)
{
tStart = cpuSecond();
array_mult_host_hybr(d_A, d_B, d_C, N);
tEnd = cpuSecond();
sum += (tEnd - tStart);
}
printf("%30s - %12.9lf ms\n", "array_mult_host_hybr", (sum / repeat * 1000));
array_mult_host_hybr(d_A, d_B, d_C, N);
array_mult_host_naive(h_A, h_B, h_C, N);
checkResult(h_C, d_C, N);
// sum = 0;
// for (int i = 0; i < repeat; ++i)
// {
// tStart = cpuSecond();
// array_mult_dev(d_A, d_B, d_C, N);
// tEnd = cpuSecond();
// sum += (tEnd - tStart);
// }
// printf("%30s - %12.9lf ms\n", "array_mult_dev", (sum / repeat * 1000));
// array_mult_host_naive(h_A, h_B, h_C, N);
// checkResult(h_C, d_C, N);
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
int main(int argc, char **argv)
{
for (int i = atoi(argv[1]); i < atoi(argv[2]); i *= 2)
{
launchTest(i, 10);
}
return(0);
}
| fd41e5abd9bec9d4f215a51a6f4454f5a8fa59f6.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <time.h>
#include <sys/time.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
__global__ void array_mult_kernel(double *A, double *B, double *C)
{
C[blockIdx.x * blockDim.x + threadIdx.x] = A[blockIdx.x * blockDim.x + threadIdx.x] *
B[blockIdx.x * blockDim.x + threadIdx.x];
}
void array_mult_dev(double *d_A, double *d_B, double *d_C, int N)
{
int bSize;
int tSize = 32;
bSize = (N >> 5) + 1;
//while (res > 128)
{
//bSize = res >> 5; // 32 = 2 ^ 5
array_mult_kernel<<<bSize, tSize>>>(d_A, d_B, d_C);
}
cudaDeviceSynchronize();
}
void array_mult_host_naive(double *A, double *B, double *C, const int N)
{
for (int idx = 0; idx < N; idx++)
{
C[idx] = A[idx] * B[idx];
}
}
void array_mult_host_test_2(double *d_A, double *d_B, double *d_C, const int N)
{
double *buffA;
double *buffB;
double *buffC;
buffA = (double*)malloc(sizeof(double) * N);
buffB = (double*)malloc(sizeof(double) * N);
buffC = (double*)malloc(sizeof(double) * N);
cudaMemcpy(buffA, d_A, sizeof(double) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(buffB, d_B, sizeof(double) * N, cudaMemcpyDeviceToHost);
for (int j = 0; j < N; ++j)
{
buffC[j] = buffA[j] * buffB[j];
}
cudaMemcpy(d_C, buffC, sizeof(double) * N, cudaMemcpyHostToDevice);
free(buffA);
free(buffB);
free(buffC);
}
void array_mult_host_test_3(double *h_A, double *h_B, double *h_C,
double *d_A, double *d_B, double *d_C,
const int N)
{
cudaMemcpy(h_A, d_A, sizeof(double) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, sizeof(double) * N, cudaMemcpyDeviceToHost);
for (int j = 0; j < N; ++j)
{
h_C[j] = h_A[j] * h_B[j];
}
cudaMemcpy(d_C, h_C, sizeof(double) * N, cudaMemcpyHostToDevice);
}
void array_mult_host_hybr(double *d_A, double *d_B, double *d_C, int N)
{
int off = 0;
if (N >= 1024)
{
int bSize = N / 1024;
off = bSize * 1024;
N -= off;
array_mult_kernel<<<bSize, 32>>>(d_A, d_B, d_C);
}
array_mult_host_test_2(&d_A[off], &d_B[off], &d_C[off], N);
cudaDeviceSynchronize();
}
void initialData(double *ip,int size)
{
// generate different seed for random number
time_t t;
srand((unsigned int) time(&t));
for (int i=0; i<size; i++)
{
ip[i] = (double)( rand() & 0xFF )/10.0;
}
}
bool checkResult(double *h_res, double *d_res, int n)
{
double buff;
for (int i = 0; i < n; ++i)
{
cudaMemcpy(&buff, &d_res[i], sizeof(double), cudaMemcpyDeviceToHost);
if (abs(buff - h_res[i]) >= 1.E-12)
{
printf("Test failed on i=%d, host: %lf, dev: %lf\n", i, h_res[i], buff);
return false;
}
//printf("%lf, %lf\n", h_res[i], buff);
}
printf("Test pass\n");
return true;
}
void launchTest(int N, int repeat)
{
double tStart, tEnd, sum;
double *h_A, *h_B, *h_C;
double *d_A, *d_B, *d_C;
h_A = (double *)malloc(sizeof(double) * N);
h_B = (double *)malloc(sizeof(double) * N);
h_C = (double *)malloc(sizeof(double) * N);
cudaMalloc((void **)&d_A, sizeof(double) * N);
cudaMalloc((void **)&d_B, sizeof(double) * N);
cudaMalloc((void **)&d_C, sizeof(double) * N);
printf("\nInitializing data (size=%d)\n", N);
initialData(h_A, N);
initialData(h_B, N);
cudaMemcpy(d_A, h_A, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(double) * N, cudaMemcpyHostToDevice);
// sum = 0;
// for (int i = 0; i < repeat; ++i)
// {
// tStart = cpuSecond();
// array_mult_host_naive(h_A, h_B, h_C, N);
// tEnd = cpuSecond();
// sum += (tEnd - tStart);
// }
// printf("%30s - %12.9lf ms\n", "array_mult_host_naive", (sum / repeat * 1000));
// sum = 0;
// for (int i = 0; i < repeat; ++i)
// {
// tStart = cpuSecond();
// array_mult_host_test_3(h_A, h_B, h_C, d_A, d_B, d_C, N);
// tEnd = cpuSecond();
// sum += (tEnd - tStart);
// }
// printf("%30s - %12.9lf ms\n", "array_mult_host_test_3", (sum / repeat * 1000));
sum = 0;
for (int i = 0; i < repeat; ++i)
{
tStart = cpuSecond();
array_mult_host_test_2(d_A, d_B, d_C, N);
tEnd = cpuSecond();
sum += (tEnd - tStart);
}
printf("%30s - %12.9lf ms\n", "array_mult_host_test_2", (sum / repeat * 1000));
sum = 0;
for (int i = 0; i < repeat; ++i)
{
tStart = cpuSecond();
array_mult_host_hybr(d_A, d_B, d_C, N);
tEnd = cpuSecond();
sum += (tEnd - tStart);
}
printf("%30s - %12.9lf ms\n", "array_mult_host_hybr", (sum / repeat * 1000));
array_mult_host_hybr(d_A, d_B, d_C, N);
array_mult_host_naive(h_A, h_B, h_C, N);
checkResult(h_C, d_C, N);
// sum = 0;
// for (int i = 0; i < repeat; ++i)
// {
// tStart = cpuSecond();
// array_mult_dev(d_A, d_B, d_C, N);
// tEnd = cpuSecond();
// sum += (tEnd - tStart);
// }
// printf("%30s - %12.9lf ms\n", "array_mult_dev", (sum / repeat * 1000));
// array_mult_host_naive(h_A, h_B, h_C, N);
// checkResult(h_C, d_C, N);
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main(int argc, char **argv)
{
for (int i = atoi(argv[1]); i < atoi(argv[2]); i *= 2)
{
launchTest(i, 10);
}
return(0);
}
|
aae39ece574e479fde6af0960441f8c07c7f0920.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Backprojection3D.h"
// 2018/11/16 apply GPU acceleration
//__device__ const double PI = 3.141592653589793;
__device__ const double EPS = 1e-15;
// thiss
__global__ void BackProjection3D(const float *dev_R, float *dev_Display, const double *dev_Size,
const int t_length, const int s_length, const int z_length, const float Beta, const double Distance,
const float *dev_Pdomain, const float *dev_Xigamadomain, const float PInt, const float XigamaInt,
const float BetaScanInt, const float minP, const float maxP, const float minXigama, const float maxXigama,
const int betaIndex, const int LP, const int LXigama, const int T_start, const int S_start)
{
const unsigned int Tindex = T_start + threadIdx.x;
const unsigned int Sindex = S_start + blockIdx.x;
//const unsigned int Zindex = blockIdx.y;
unsigned long thread_id;
// initialize
const double Resolution_t = 1.0 * dev_Size[0] / t_length;
const double Resolution_s = 1.0 * dev_Size[1] / s_length;
const double Resolution_z = 1.0 * dev_Size[2] / z_length;
// rotation center
double Center_t = dev_Size[0] / 2;
double Center_s = dev_Size[1] / 2;
double Center_z = dev_Size[2] / 2;
// this is a little different from code on MATLAB
// image pixel in ground coordinate
double image_t = (Tindex + 0.5) * Resolution_t - Center_t;
double image_s = (Sindex + 0.5) * Resolution_s - Center_s;
double image_z;
// rotate in ground coordinate
double dect_t = image_t * cos(Beta) + image_s * sin(Beta);
double dect_s = -image_t * sin(Beta) + image_s * cos(Beta);
double dect_z;
// define the projection position on the detector
double LengthRatio = Distance / (Distance - dect_s);
double Xigama;
double P = dect_t * LengthRatio;
unsigned short XigamaN1index = 0, XigamaN2index = 0, PN1index = 0, PN2index = 0;
double P_domain1 = 0, P_domain2 = 0, Xigama_domain1 = 0, Xigama_domain2 = 0;
double Xig1 = 0, Xig2 = 0, P1 = 0, P2 = 0;
double Display_pBeta = 0;
double backweight = 0;
//double LengthinROI = 0;
// according to euler equation
// define the source in matlab coordinate
double source_t = Center_t - Distance * sin(Beta), source_s = Center_s + Distance * cos(Beta), source_z;
// in matlab coordinate
// assume the projection line go through the center of the current pixel
double DetectPoint_tend = image_t + Center_t, DetectPoint_send = image_s + Center_s, DetectPoint_zend;
// actual Size
double tlow = 0, thigh = t_length * Resolution_t, slow = 0, shigh = s_length * Resolution_s,
zlow = 0, zhigh = z_length * Resolution_z;
double tlow_s, tlow_z, slow_t, slow_z, zlow_t, zlow_s/*, thigh_s, thigh_z, shigh_t, shigh_z, zhigh_t, zhigh_s*/;
double T1 = 0, S1 = 0, Z1 = 0/*, T2 = 0, S2 = 0, Z2 = 0*/;
double LengthinPixel;
for (short Zindex = 0; Zindex < z_length; Zindex++)
{
image_z = (Zindex + 0.5) * Resolution_z - Center_z;
dect_z = image_z;
Xigama = dect_z * LengthRatio;
if ((P >= minP) && (P < maxP) && (Xigama >= minXigama) && (Xigama < maxXigama))
{
XigamaN1index = floor(fabs(Xigama - dev_Xigamadomain[0]) / XigamaInt);
XigamaN2index = XigamaN1index + 1;
PN1index = floor(fabs(P - dev_Pdomain[0]) / PInt);
PN2index = PN1index + 1;
P_domain1 = dev_Pdomain[PN1index]; P_domain2 = dev_Pdomain[PN2index];
Xigama_domain1 = dev_Xigamadomain[XigamaN1index]; Xigama_domain2 = dev_Xigamadomain[XigamaN2index];
//bilinear interpolation
Xig1 = fabs(Xigama - Xigama_domain1); Xig2 = fabs(Xigama_domain2 - Xigama);
P1 = fabs(P - P_domain1); P2 = fabs(P_domain2 - P);
Display_pBeta = (Xig2 * P2 * dev_R[betaIndex * LP * LXigama + XigamaN1index * LP + PN1index]
+ Xig1 * P2 * dev_R[betaIndex * LP * LXigama + XigamaN2index * LP + PN1index] + Xig2 * P1 * dev_R[betaIndex * LP * LXigama + XigamaN1index * LP + PN2index]
+ Xig1 * P1 * dev_R[betaIndex * LP * LXigama + XigamaN2index * LP + PN2index]) / (PInt * XigamaInt);
// the way to compute backweight is to get the cross length in the specific pixel and the whole ROI
// according to euler equation
// define the source in matlab coordinate
source_z = Center_z;
// in matlab coordinate
// assume the projection line go through the center of the current pixel
DetectPoint_zend = image_z + Center_z;
// first compute length in whole ROI
// compute the first and last point in the ROI
// using DetectPoint_end set up projection equation
//tlow_s = source_s + (tlow - source_t) * (DetectPoint_send - source_s) / (DetectPoint_tend - source_t + EPS);
//tlow_z = source_z + (tlow - source_t) * (DetectPoint_zend - source_z) / (DetectPoint_tend - source_t + EPS);
//thigh_s = source_s + (thigh - source_t) * (DetectPoint_send - source_s) / (DetectPoint_tend - source_t);
//thigh_z = source_z + (thigh - source_t) * (DetectPoint_zend - source_z) / (DetectPoint_tend - source_t);
//slow_t = source_t + (slow - source_s) * (DetectPoint_tend - source_t) / (DetectPoint_send - source_s + EPS);
//slow_z = source_z + (slow - source_s) * (DetectPoint_zend - source_z) / (DetectPoint_send - source_s + EPS);
//shigh_t = source_t + (shigh - source_s) * (DetectPoint_tend - source_t) / (DetectPoint_send - source_s);
//shigh_z = source_z + (shigh - source_s) * (DetectPoint_zend - source_z) / (DetectPoint_send - source_s);
//zlow_t = source_t + (zlow - source_z) * (DetectPoint_tend - source_t) / (DetectPoint_zend - source_z + EPS);
//zlow_s = source_s + (zlow - source_z) * (DetectPoint_send - source_s) / (DetectPoint_zend - source_z + EPS);
//zhigh_t = source_t + (zhigh - source_z) * (DetectPoint_tend - source_t) / (DetectPoint_zend - source_z);
//zhigh_s = source_s + (zhigh - source_z) * (DetectPoint_send - source_s) / (DetectPoint_zend - source_z);
// double *Range = new double [6]; // XYXY small-big(number)
//if (tlow_s >= 0 && tlow_s <= shigh && tlow_z >= 0 && tlow_z <= zhigh)
//{
// T1 = tlow; S1 = tlow_s; Z1 = tlow_z;
// if (thigh_s >= 0 && thigh_s <= shigh && thigh_s != S1
// && thigh_z >= 0 && thigh_z <= zhigh && thigh_z != Z1)
// {
// T2 = thigh; S2 = thigh_s; Z2 = thigh_z;
// }
// else if (slow_t >= 0 && slow_t <= thigh && slow_t != T1
// && slow_z >= 0 && slow_z <= zhigh && slow_z != Z1)
// {
// T2 = slow_t; S2 = slow; Z2 = slow_z;
// }
// else if (shigh_t >= 0 && shigh_t <= thigh && shigh_t != T1
// && shigh_z >= 0 && shigh_z <= zhigh && shigh_z != Z1)
// {
// T2 = shigh_t; S2 = shigh; Z2 = shigh_z;
// }
// else if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (thigh_s >= 0 && thigh_s <= shigh && thigh_z >= 0 && thigh_z <= zhigh)
//{
// T1 = thigh; S1 = thigh_s; Z1 = thigh_z;
// if (slow_t >= 0 && slow_t <= thigh && slow_t != T1
// && slow_z >= 0 && slow_z <= zhigh && slow_z != Z1)
// {
// T2 = slow_t; S2 = slow; Z2 = slow_z;
// }
// else if (shigh_t >= 0 && shigh_t <= thigh && shigh_t != T1
// && shigh_z >= 0 && shigh_z <= zhigh && shigh_z != Z1)
// {
// T2 = shigh_t; S2 = shigh; Z2 = shigh_z;
// }
// else if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (slow_t >= 0 && slow_t <= thigh && slow_z >= 0 && slow_z <= zhigh)
//{
// T1 = slow_t; S1 = slow; Z1 = slow_z;
// if (shigh_t >= 0 && shigh_t <= thigh && shigh_t != T1
// && shigh_z >= 0 && shigh_z <= zhigh && shigh_z != Z1)
// {
// T2 = shigh_t; S2 = shigh; Z2 = shigh_z;
// }
// else if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (shigh_t >= 0 && shigh_t <= thigh && shigh_z >= 0 && shigh_z <= zhigh)
//{
// T1 = shigh_t; S1 = shigh; Z1 = shigh_z;
// if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (zlow_t >= 0 && zlow_t <= thigh && zlow_s >= 0 && zlow_s <= shigh)
//{
// T1 = zlow_t; S1 = zlow_s; Z1 = zlow;
// if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else
//{
// //dev_Projection[threadid] = threadid;
// return;
//}
//LengthinROI = Distance(T1, S1, Z1, T2, S2, Z2);
//if (T1 == T2 && S1 == S2 && Z1 == Z2) // to solve the special case
//{
// dev_Display[thread_id] += T1 * 100000 + S1 * 1000 + Z1 * 10;
// return;
//}
// secondly compute length in a single pixel, the process is very similar to the previous.
// because this time the line goes through the center point of the pixel. So some kind of symmetry happens.
// since the global function can not call exterior function, so the previous code will be copied here.
// actual Size in matlab coordinate
tlow = Tindex * Resolution_t; thigh = (Tindex + 1) * Resolution_t;
slow = Sindex * Resolution_s; shigh = (Sindex + 1) * Resolution_s;
zlow = Zindex * Resolution_z; zhigh = (Zindex + 1) * Resolution_z;
// compute the first and last point in the ROI
// using DetectPoint_end set up projection equation
tlow_s = source_s + (tlow - source_t) * (DetectPoint_send - source_s) / (DetectPoint_tend - source_t + EPS);
tlow_z = source_z + (tlow - source_t) * (DetectPoint_zend - source_z) / (DetectPoint_tend - source_t + EPS);
slow_t = source_t + (slow - source_s) * (DetectPoint_tend - source_t) / (DetectPoint_send - source_s + EPS);
slow_z = source_z + (slow - source_s) * (DetectPoint_zend - source_z) / (DetectPoint_send - source_s + EPS);
zlow_t = source_t + (zlow - source_z) * (DetectPoint_tend - source_t) / (DetectPoint_zend - source_z + EPS);
zlow_s = source_s + (zlow - source_z) * (DetectPoint_send - source_s) / (DetectPoint_zend - source_z + EPS);
// double *Range = new double [6]; // XYXY small-big(number)
T1 = 0; S1 = 0; Z1 = 0; /*T2 = 0; S2 = 0; Z2 = 0;*/
if (tlow_s >= slow && tlow_s <= shigh && tlow_z >= zlow && tlow_z <= zhigh)
{
T1 = tlow; S1 = tlow_s; Z1 = tlow_z;
// for the symmetry, there is no need to compute T2
}
else if (slow_t >= tlow && slow_t <= thigh && slow_z >= zlow && slow_z <= zhigh)
{
T1 = slow_t; S1 = slow; Z1 = slow_z;
// for the symmetry, there is no need to compute T2
}
else if (zlow_t >= tlow && zlow_t <= thigh && zlow_s >= slow && zlow_s <= shigh)
{
T1 = zlow_t; S1 = zlow_s; Z1 = zlow;
// for the symmetry, there is no need to compute T2
}
else
{
// dev_Projection[threadid] = threadid;
return;
}
LengthinPixel = 2 * Distance(T1, S1, Z1, DetectPoint_tend, DetectPoint_send, DetectPoint_zend);
//if (LengthinROI == 0)
// return;
backweight = LengthinPixel /*/ LengthinROI*/; // no need for normalization
thread_id = Zindex * (t_length * s_length) + Sindex * t_length + Tindex;
dev_Display[thread_id] += Display_pBeta * backweight;
}
}
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t BackPro(float *Display, const float *R, const float *Xigamadomain, const float *Pdomain,
const float *BetaScanRange, const double Distance, const int LBeta, const int LP, const int LXigama,
const double *Size, const int t_length, const int s_length, const int z_length)
{
mexPrintf("BackPro!\n");
float *dev_R = 0;
float *dev_BetaScanRange = 0, *dev_Pdomain = 0, *dev_Xigamadomain = 0;
double *dev_Size = 0;
float *dev_Display = 0;
float PInt = fabs(Pdomain[1] - Pdomain[0]);
float XigamaInt = fabs(Xigamadomain[1] - Xigamadomain[0]);
float BetaScanInt = fabs(BetaScanRange[1] - BetaScanRange[0]);
float maxP = MAX(Pdomain[0], Pdomain[LP - 1]);
float minP = MIN(Pdomain[0], Pdomain[LP - 1]);
float maxXigama = MAX(Xigamadomain[0], Xigamadomain[LXigama - 1]);
float minXigama = MIN(Xigamadomain[0], Xigamadomain[LXigama - 1]);
//mexPrintf("%lf %lf %lf %lf \n", maxGama, minGama, maxXigama, minXigama);
const long LDisplay = t_length * s_length * z_length;
const long LR = LP * LXigama * LBeta;
short thread_cubic_Bp_x = MIN(threadX, t_length);
short block_cubic_Bp_x = MIN(blockX, s_length);
const dim3 thread_cubic_Bp(thread_cubic_Bp_x, 1, 1);
const dim3 block_cubic_Bp(block_cubic_Bp_x, 1, 1);
dim3 thread_cubic_Bp_residual(1, 1, 1); // initial
dim3 block_cubic_Bp_residual(1, 1, 1); // initial
short TlengthResidual = t_length % threadX;
short SlengthResidual = s_length % blockX;
short T_Time = t_length / threadX;
short S_Time = s_length / blockX;
short T_start = 0;
short S_start = 0;
//float Beta = 0;
if (TlengthResidual != 0)
{
thread_cubic_Bp_residual.x = TlengthResidual;
}
if (SlengthResidual != 0)
{
block_cubic_Bp_residual.x = SlengthResidual;
}
hipError_t cudaStatus;
mexPrintf("start cuda\n");
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
mexPrintf("hipSetDevice failed! Do you have a CUDA-capable GPU installed? %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
mexPrintf("call for space in GPU\n");
// Allocate GPU buffers for four vectors (4 inputs).
cudaStatus = hipMalloc((void**)&dev_R, LR * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_R hipMalloc failed!\n");
mexPrintf("dev_R hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_Pdomain, LP * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_Pdomain hipMalloc failed!\n");
mexPrintf("dev_Pdomain hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_BetaScanRange, LBeta * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_BetaScanRange hipMalloc failed!\n");
mexPrintf("dev_BetaScanRange hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_Xigamadomain, LXigama * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_Xigamadomain hipMalloc failed!\n");
mexPrintf("dev_Xigamadomain hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//mexPrintf("copy data in CPU to GPU\n");
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_R, R, LR * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy R failed!\n");
mexPrintf("hipMemcpy R failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMemcpy(dev_Pdomain, Pdomain, LP * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy Gamadomain failed!\n");
mexPrintf("hipMemcpy Gamadomain failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMemcpy(dev_BetaScanRange, BetaScanRange, LBeta * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy BetaScanRange failed!\n");
mexPrintf("hipMemcpy BetaScanRange failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMemcpy(dev_Xigamadomain, Xigamadomain, LXigama * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy Xigamadomain failed!\n");
mexPrintf("hipMemcpy Xigamadomain failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
mexPrintf("start parallel computation\n");
mexPrintf("backprojection\n");
cudaStatus = hipMalloc((void**)&dev_Size, 3 * sizeof(double));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_Size hipMalloc failed!\n");
mexPrintf("dev_Size hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipMemcpy(dev_Size, Size, 3 * sizeof(double), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy Size failed!\n");
mexPrintf("hipMemcpy Size failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// output
cudaStatus = hipMalloc((void**)&dev_Display, LDisplay * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "dev_Display hipMalloc failed!\n");
mexPrintf("dev_Display hipMalloc failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
hipMemset(dev_Display, 0, sizeof(float));
//Backprojection
for (int betaIndex = 0; betaIndex < LBeta; betaIndex++)
{
for (int numT = 0; numT < T_Time; numT++)
{
for (int numS = 0; numS < S_Time; numS++)
{
T_start = numT * threadX;
S_start = numS * blockX;
BackProjection3D << <block_cubic_Bp, thread_cubic_Bp >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", hipGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", hipGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
}
if (TlengthResidual != 0)
{
T_start = t_length - TlengthResidual;
for (int numS = 0; numS < S_Time; numS++)
{
S_start = numS * blockX;
BackProjection3D << <block_cubic_Bp, thread_cubic_Bp_residual >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", hipGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", hipGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
if (SlengthResidual != 0)
{
S_start = s_length - SlengthResidual;
BackProjection3D << <block_cubic_Bp_residual, thread_cubic_Bp_residual >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", hipGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", hipGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
}
if (SlengthResidual != 0)
{
S_start = s_length - SlengthResidual;
for (int numT = 0; numT < T_Time; numT++)
{
T_start = numT * threadX;
BackProjection3D << <block_cubic_Bp_residual, thread_cubic_Bp >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", hipGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", hipGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
}
}
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", hipGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
mexPrintf("hipDeviceSynchronize returned error code %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
//Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(Display, dev_Display, LDisplay * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
mexPrintf("hipMemcpy dev_Display failed! %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
Error:
hipFree(dev_R);
hipFree(dev_BetaScanRange);
hipFree(dev_Pdomain);
hipFree(dev_Xigamadomain);
hipFree(dev_Display);
hipFree(dev_Size);
mexPrintf("Exit Bakprojection3D\n");
return cudaStatus;
} | aae39ece574e479fde6af0960441f8c07c7f0920.cu | #include "Backprojection3D.h"
// 2018/11/16 apply GPU acceleration
//__device__ const double PI = 3.141592653589793;
__device__ const double EPS = 1e-15;
// thiss
__global__ void BackProjection3D(const float *dev_R, float *dev_Display, const double *dev_Size,
const int t_length, const int s_length, const int z_length, const float Beta, const double Distance,
const float *dev_Pdomain, const float *dev_Xigamadomain, const float PInt, const float XigamaInt,
const float BetaScanInt, const float minP, const float maxP, const float minXigama, const float maxXigama,
const int betaIndex, const int LP, const int LXigama, const int T_start, const int S_start)
{
const unsigned int Tindex = T_start + threadIdx.x;
const unsigned int Sindex = S_start + blockIdx.x;
//const unsigned int Zindex = blockIdx.y;
unsigned long thread_id;
// initialize
const double Resolution_t = 1.0 * dev_Size[0] / t_length;
const double Resolution_s = 1.0 * dev_Size[1] / s_length;
const double Resolution_z = 1.0 * dev_Size[2] / z_length;
// rotation center
double Center_t = dev_Size[0] / 2;
double Center_s = dev_Size[1] / 2;
double Center_z = dev_Size[2] / 2;
// this is a little different from code on MATLAB
// image pixel in ground coordinate
double image_t = (Tindex + 0.5) * Resolution_t - Center_t;
double image_s = (Sindex + 0.5) * Resolution_s - Center_s;
double image_z;
// rotate in ground coordinate
double dect_t = image_t * cos(Beta) + image_s * sin(Beta);
double dect_s = -image_t * sin(Beta) + image_s * cos(Beta);
double dect_z;
// define the projection position on the detector
double LengthRatio = Distance / (Distance - dect_s);
double Xigama;
double P = dect_t * LengthRatio;
unsigned short XigamaN1index = 0, XigamaN2index = 0, PN1index = 0, PN2index = 0;
double P_domain1 = 0, P_domain2 = 0, Xigama_domain1 = 0, Xigama_domain2 = 0;
double Xig1 = 0, Xig2 = 0, P1 = 0, P2 = 0;
double Display_pBeta = 0;
double backweight = 0;
//double LengthinROI = 0;
// according to euler equation
// define the source in matlab coordinate
double source_t = Center_t - Distance * sin(Beta), source_s = Center_s + Distance * cos(Beta), source_z;
// in matlab coordinate
// assume the projection line go through the center of the current pixel
double DetectPoint_tend = image_t + Center_t, DetectPoint_send = image_s + Center_s, DetectPoint_zend;
// actual Size
double tlow = 0, thigh = t_length * Resolution_t, slow = 0, shigh = s_length * Resolution_s,
zlow = 0, zhigh = z_length * Resolution_z;
double tlow_s, tlow_z, slow_t, slow_z, zlow_t, zlow_s/*, thigh_s, thigh_z, shigh_t, shigh_z, zhigh_t, zhigh_s*/;
double T1 = 0, S1 = 0, Z1 = 0/*, T2 = 0, S2 = 0, Z2 = 0*/;
double LengthinPixel;
for (short Zindex = 0; Zindex < z_length; Zindex++)
{
image_z = (Zindex + 0.5) * Resolution_z - Center_z;
dect_z = image_z;
Xigama = dect_z * LengthRatio;
if ((P >= minP) && (P < maxP) && (Xigama >= minXigama) && (Xigama < maxXigama))
{
XigamaN1index = floor(fabs(Xigama - dev_Xigamadomain[0]) / XigamaInt);
XigamaN2index = XigamaN1index + 1;
PN1index = floor(fabs(P - dev_Pdomain[0]) / PInt);
PN2index = PN1index + 1;
P_domain1 = dev_Pdomain[PN1index]; P_domain2 = dev_Pdomain[PN2index];
Xigama_domain1 = dev_Xigamadomain[XigamaN1index]; Xigama_domain2 = dev_Xigamadomain[XigamaN2index];
//bilinear interpolation
Xig1 = fabs(Xigama - Xigama_domain1); Xig2 = fabs(Xigama_domain2 - Xigama);
P1 = fabs(P - P_domain1); P2 = fabs(P_domain2 - P);
Display_pBeta = (Xig2 * P2 * dev_R[betaIndex * LP * LXigama + XigamaN1index * LP + PN1index]
+ Xig1 * P2 * dev_R[betaIndex * LP * LXigama + XigamaN2index * LP + PN1index] + Xig2 * P1 * dev_R[betaIndex * LP * LXigama + XigamaN1index * LP + PN2index]
+ Xig1 * P1 * dev_R[betaIndex * LP * LXigama + XigamaN2index * LP + PN2index]) / (PInt * XigamaInt);
// the way to compute backweight is to get the cross length in the specific pixel and the whole ROI
// according to euler equation
// define the source in matlab coordinate
source_z = Center_z;
// in matlab coordinate
// assume the projection line go through the center of the current pixel
DetectPoint_zend = image_z + Center_z;
// first compute length in whole ROI
// compute the first and last point in the ROI
// using DetectPoint_end set up projection equation
//tlow_s = source_s + (tlow - source_t) * (DetectPoint_send - source_s) / (DetectPoint_tend - source_t + EPS);
//tlow_z = source_z + (tlow - source_t) * (DetectPoint_zend - source_z) / (DetectPoint_tend - source_t + EPS);
//thigh_s = source_s + (thigh - source_t) * (DetectPoint_send - source_s) / (DetectPoint_tend - source_t);
//thigh_z = source_z + (thigh - source_t) * (DetectPoint_zend - source_z) / (DetectPoint_tend - source_t);
//slow_t = source_t + (slow - source_s) * (DetectPoint_tend - source_t) / (DetectPoint_send - source_s + EPS);
//slow_z = source_z + (slow - source_s) * (DetectPoint_zend - source_z) / (DetectPoint_send - source_s + EPS);
//shigh_t = source_t + (shigh - source_s) * (DetectPoint_tend - source_t) / (DetectPoint_send - source_s);
//shigh_z = source_z + (shigh - source_s) * (DetectPoint_zend - source_z) / (DetectPoint_send - source_s);
//zlow_t = source_t + (zlow - source_z) * (DetectPoint_tend - source_t) / (DetectPoint_zend - source_z + EPS);
//zlow_s = source_s + (zlow - source_z) * (DetectPoint_send - source_s) / (DetectPoint_zend - source_z + EPS);
//zhigh_t = source_t + (zhigh - source_z) * (DetectPoint_tend - source_t) / (DetectPoint_zend - source_z);
//zhigh_s = source_s + (zhigh - source_z) * (DetectPoint_send - source_s) / (DetectPoint_zend - source_z);
// double *Range = new double [6]; // XYXY small-big(number)
//if (tlow_s >= 0 && tlow_s <= shigh && tlow_z >= 0 && tlow_z <= zhigh)
//{
// T1 = tlow; S1 = tlow_s; Z1 = tlow_z;
// if (thigh_s >= 0 && thigh_s <= shigh && thigh_s != S1
// && thigh_z >= 0 && thigh_z <= zhigh && thigh_z != Z1)
// {
// T2 = thigh; S2 = thigh_s; Z2 = thigh_z;
// }
// else if (slow_t >= 0 && slow_t <= thigh && slow_t != T1
// && slow_z >= 0 && slow_z <= zhigh && slow_z != Z1)
// {
// T2 = slow_t; S2 = slow; Z2 = slow_z;
// }
// else if (shigh_t >= 0 && shigh_t <= thigh && shigh_t != T1
// && shigh_z >= 0 && shigh_z <= zhigh && shigh_z != Z1)
// {
// T2 = shigh_t; S2 = shigh; Z2 = shigh_z;
// }
// else if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (thigh_s >= 0 && thigh_s <= shigh && thigh_z >= 0 && thigh_z <= zhigh)
//{
// T1 = thigh; S1 = thigh_s; Z1 = thigh_z;
// if (slow_t >= 0 && slow_t <= thigh && slow_t != T1
// && slow_z >= 0 && slow_z <= zhigh && slow_z != Z1)
// {
// T2 = slow_t; S2 = slow; Z2 = slow_z;
// }
// else if (shigh_t >= 0 && shigh_t <= thigh && shigh_t != T1
// && shigh_z >= 0 && shigh_z <= zhigh && shigh_z != Z1)
// {
// T2 = shigh_t; S2 = shigh; Z2 = shigh_z;
// }
// else if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (slow_t >= 0 && slow_t <= thigh && slow_z >= 0 && slow_z <= zhigh)
//{
// T1 = slow_t; S1 = slow; Z1 = slow_z;
// if (shigh_t >= 0 && shigh_t <= thigh && shigh_t != T1
// && shigh_z >= 0 && shigh_z <= zhigh && shigh_z != Z1)
// {
// T2 = shigh_t; S2 = shigh; Z2 = shigh_z;
// }
// else if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (shigh_t >= 0 && shigh_t <= thigh && shigh_z >= 0 && shigh_z <= zhigh)
//{
// T1 = shigh_t; S1 = shigh; Z1 = shigh_z;
// if (zlow_t >= 0 && zlow_t <= thigh && zlow_t != T1
// && zlow_s >= 0 && zlow_s <= shigh && zlow_s != S1)
// {
// T2 = zlow_t; S2 = zlow_s; Z2 = zlow;
// }
// else if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else if (zlow_t >= 0 && zlow_t <= thigh && zlow_s >= 0 && zlow_s <= shigh)
//{
// T1 = zlow_t; S1 = zlow_s; Z1 = zlow;
// if (zhigh_t >= 0 && zhigh_t <= thigh && zhigh_t != T1
// && zhigh_s >= 0 && zhigh_s <= shigh && zhigh_s != S1)
// {
// T2 = zhigh_t; S2 = zhigh_s; Z2 = zhigh;
// }
// else
// return;
//}
//else
//{
// //dev_Projection[threadid] = threadid;
// return;
//}
//LengthinROI = Distance(T1, S1, Z1, T2, S2, Z2);
//if (T1 == T2 && S1 == S2 && Z1 == Z2) // to solve the special case
//{
// dev_Display[thread_id] += T1 * 100000 + S1 * 1000 + Z1 * 10;
// return;
//}
// secondly compute length in a single pixel, the process is very similar to the previous.
// because this time the line goes through the center point of the pixel. So some kind of symmetry happens.
// since the global function can not call exterior function, so the previous code will be copied here.
// actual Size in matlab coordinate
tlow = Tindex * Resolution_t; thigh = (Tindex + 1) * Resolution_t;
slow = Sindex * Resolution_s; shigh = (Sindex + 1) * Resolution_s;
zlow = Zindex * Resolution_z; zhigh = (Zindex + 1) * Resolution_z;
// compute the first and last point in the ROI
// using DetectPoint_end set up projection equation
tlow_s = source_s + (tlow - source_t) * (DetectPoint_send - source_s) / (DetectPoint_tend - source_t + EPS);
tlow_z = source_z + (tlow - source_t) * (DetectPoint_zend - source_z) / (DetectPoint_tend - source_t + EPS);
slow_t = source_t + (slow - source_s) * (DetectPoint_tend - source_t) / (DetectPoint_send - source_s + EPS);
slow_z = source_z + (slow - source_s) * (DetectPoint_zend - source_z) / (DetectPoint_send - source_s + EPS);
zlow_t = source_t + (zlow - source_z) * (DetectPoint_tend - source_t) / (DetectPoint_zend - source_z + EPS);
zlow_s = source_s + (zlow - source_z) * (DetectPoint_send - source_s) / (DetectPoint_zend - source_z + EPS);
// double *Range = new double [6]; // XYXY small-big(number)
T1 = 0; S1 = 0; Z1 = 0; /*T2 = 0; S2 = 0; Z2 = 0;*/
if (tlow_s >= slow && tlow_s <= shigh && tlow_z >= zlow && tlow_z <= zhigh)
{
T1 = tlow; S1 = tlow_s; Z1 = tlow_z;
// for the symmetry, there is no need to compute T2
}
else if (slow_t >= tlow && slow_t <= thigh && slow_z >= zlow && slow_z <= zhigh)
{
T1 = slow_t; S1 = slow; Z1 = slow_z;
// for the symmetry, there is no need to compute T2
}
else if (zlow_t >= tlow && zlow_t <= thigh && zlow_s >= slow && zlow_s <= shigh)
{
T1 = zlow_t; S1 = zlow_s; Z1 = zlow;
// for the symmetry, there is no need to compute T2
}
else
{
// dev_Projection[threadid] = threadid;
return;
}
LengthinPixel = 2 * Distance(T1, S1, Z1, DetectPoint_tend, DetectPoint_send, DetectPoint_zend);
//if (LengthinROI == 0)
// return;
backweight = LengthinPixel /*/ LengthinROI*/; // no need for normalization
thread_id = Zindex * (t_length * s_length) + Sindex * t_length + Tindex;
dev_Display[thread_id] += Display_pBeta * backweight;
}
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t BackPro(float *Display, const float *R, const float *Xigamadomain, const float *Pdomain,
const float *BetaScanRange, const double Distance, const int LBeta, const int LP, const int LXigama,
const double *Size, const int t_length, const int s_length, const int z_length)
{
mexPrintf("BackPro!\n");
float *dev_R = 0;
float *dev_BetaScanRange = 0, *dev_Pdomain = 0, *dev_Xigamadomain = 0;
double *dev_Size = 0;
float *dev_Display = 0;
float PInt = fabs(Pdomain[1] - Pdomain[0]);
float XigamaInt = fabs(Xigamadomain[1] - Xigamadomain[0]);
float BetaScanInt = fabs(BetaScanRange[1] - BetaScanRange[0]);
float maxP = MAX(Pdomain[0], Pdomain[LP - 1]);
float minP = MIN(Pdomain[0], Pdomain[LP - 1]);
float maxXigama = MAX(Xigamadomain[0], Xigamadomain[LXigama - 1]);
float minXigama = MIN(Xigamadomain[0], Xigamadomain[LXigama - 1]);
//mexPrintf("%lf %lf %lf %lf \n", maxGama, minGama, maxXigama, minXigama);
const long LDisplay = t_length * s_length * z_length;
const long LR = LP * LXigama * LBeta;
short thread_cubic_Bp_x = MIN(threadX, t_length);
short block_cubic_Bp_x = MIN(blockX, s_length);
const dim3 thread_cubic_Bp(thread_cubic_Bp_x, 1, 1);
const dim3 block_cubic_Bp(block_cubic_Bp_x, 1, 1);
dim3 thread_cubic_Bp_residual(1, 1, 1); // initial
dim3 block_cubic_Bp_residual(1, 1, 1); // initial
short TlengthResidual = t_length % threadX;
short SlengthResidual = s_length % blockX;
short T_Time = t_length / threadX;
short S_Time = s_length / blockX;
short T_start = 0;
short S_start = 0;
//float Beta = 0;
if (TlengthResidual != 0)
{
thread_cubic_Bp_residual.x = TlengthResidual;
}
if (SlengthResidual != 0)
{
block_cubic_Bp_residual.x = SlengthResidual;
}
cudaError_t cudaStatus;
mexPrintf("start cuda\n");
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
mexPrintf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed? %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
mexPrintf("call for space in GPU\n");
// Allocate GPU buffers for four vectors (4 inputs).
cudaStatus = cudaMalloc((void**)&dev_R, LR * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_R cudaMalloc failed!\n");
mexPrintf("dev_R cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_Pdomain, LP * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_Pdomain cudaMalloc failed!\n");
mexPrintf("dev_Pdomain cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_BetaScanRange, LBeta * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_BetaScanRange cudaMalloc failed!\n");
mexPrintf("dev_BetaScanRange cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_Xigamadomain, LXigama * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_Xigamadomain cudaMalloc failed!\n");
mexPrintf("dev_Xigamadomain cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//mexPrintf("copy data in CPU to GPU\n");
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_R, R, LR * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy R failed!\n");
mexPrintf("cudaMemcpy R failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMemcpy(dev_Pdomain, Pdomain, LP * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy Gamadomain failed!\n");
mexPrintf("cudaMemcpy Gamadomain failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMemcpy(dev_BetaScanRange, BetaScanRange, LBeta * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy BetaScanRange failed!\n");
mexPrintf("cudaMemcpy BetaScanRange failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMemcpy(dev_Xigamadomain, Xigamadomain, LXigama * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy Xigamadomain failed!\n");
mexPrintf("cudaMemcpy Xigamadomain failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
mexPrintf("start parallel computation\n");
mexPrintf("backprojection\n");
cudaStatus = cudaMalloc((void**)&dev_Size, 3 * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_Size cudaMalloc failed!\n");
mexPrintf("dev_Size cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaMemcpy(dev_Size, Size, 3 * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy Size failed!\n");
mexPrintf("cudaMemcpy Size failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// output
cudaStatus = cudaMalloc((void**)&dev_Display, LDisplay * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dev_Display cudaMalloc failed!\n");
mexPrintf("dev_Display cudaMalloc failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaMemset(dev_Display, 0, sizeof(float));
//Backprojection
for (int betaIndex = 0; betaIndex < LBeta; betaIndex++)
{
for (int numT = 0; numT < T_Time; numT++)
{
for (int numS = 0; numS < S_Time; numS++)
{
T_start = numT * threadX;
S_start = numS * blockX;
BackProjection3D << <block_cubic_Bp, thread_cubic_Bp >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
}
if (TlengthResidual != 0)
{
T_start = t_length - TlengthResidual;
for (int numS = 0; numS < S_Time; numS++)
{
S_start = numS * blockX;
BackProjection3D << <block_cubic_Bp, thread_cubic_Bp_residual >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
if (SlengthResidual != 0)
{
S_start = s_length - SlengthResidual;
BackProjection3D << <block_cubic_Bp_residual, thread_cubic_Bp_residual >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
}
if (SlengthResidual != 0)
{
S_start = s_length - SlengthResidual;
for (int numT = 0; numT < T_Time; numT++)
{
T_start = numT * threadX;
BackProjection3D << <block_cubic_Bp_residual, thread_cubic_Bp >> > (dev_R, dev_Display, dev_Size, t_length, s_length, z_length,
BetaScanRange[betaIndex], Distance, dev_Pdomain, dev_Xigamadomain, PInt, XigamaInt, BetaScanInt, minP, maxP,
minXigama, maxXigama, betaIndex, LP, LXigama, T_start, S_start);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("Error happens at betaIndex: %d\n", betaIndex);
goto Error;
}
}
}
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "BackProjection launch failed: %s\n", cudaGetErrorString(cudaStatus));
mexPrintf("BackProjection launch failed %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
mexPrintf("cudaDeviceSynchronize returned error code %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
//Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(Display, dev_Display, LDisplay * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
mexPrintf("cudaMemcpy dev_Display failed! %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
Error:
cudaFree(dev_R);
cudaFree(dev_BetaScanRange);
cudaFree(dev_Pdomain);
cudaFree(dev_Xigamadomain);
cudaFree(dev_Display);
cudaFree(dev_Size);
mexPrintf("Exit Bakprojection3D\n");
return cudaStatus;
} |
0aa63f701e8e03beeb1fbf09b2a360dd7ba19208.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d -> d
@author Peng Du
*/
#include "common_magma.h"
#define qmod(a,b) ((a)-(__mul24((b),(a)/(b))))
#define hipblasDgemm magmablas_dgemm
__global__ void
b_copy_kernel (int M, int N, double *b, int ldb, double *d_x, int ldx);
extern "C"
void diag_dtrtri (magma_int_t M, char uplo, char diag, const double *A, double *d_dinvA, magma_int_t lda);
#define b_copy(); dim3 dimBlock((M>=MAX_THREAD_PER_BLOCK)?MAX_THREAD_PER_BLOCK:(WARP_SIZE*((M/WARP_SIZE)+(M%WARP_SIZE!=0))), 1);\
dim3 dimGrid(M/dimBlock.x+(M%dimBlock.x!=0), N);\
hipLaunchKernelGGL(( b_copy_kernel), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , M, N, b, ldb, d_x, M);
//magma_device_sync();
#define MAX_THREAD_PER_BLOCK 512
#define WARP_SIZE 32
#define BLOCK_SIZE 16 // inner blocking size, <=32
#define NB 128// outer blocking size, >BLOCK_SIZE
/*
* magmablas_dtrsm
*/
extern "C"
void magmablas_dtrsm_work( char side, char uplo, char tran, char diag, magma_int_t M, magma_int_t N,
double alpha, const double* A, magma_int_t lda, double* b, magma_int_t ldb,
int flag, double *d_dinvA, double *d_x )
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
dtrsm solves one of the matrix equations on gpu
op( a )*x = alpha*b, or x*op( a ) = alpha*b,
where alpha is a scalar, x and b are m by n matrices, a is a unit, or
non-unit, upper or lower triangular matrix and op( a ) is one of
op( A ) = A or op( A ) = A'.
The matrix X is overwritten on B.
When M or N is not a multiple of blocking size, which is 32 for now, hipblasDtrsm will
be called instead. There soon will not be this limitation both for arbitrary problem
size and blocking size.
This is an asynchronous version of magmablas_dtrsm with "workspace" as an argument.
Arguments
==========
side - CHARACTER*1.
On entry, side specifies whether op( A ) appears on the left
or right of X as follows:
side = 'L' or 'l' op( A )*X = alpha*B.
side = 'R' or 'r' X*op( A ) = alpha*B.
Unchanged on exit.
uplo - CHARACTER*1.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
uplo = 'U' or 'u' A is an upper triangular matrix.
uplo = 'L' or 'l' A is a lower triangular matrix.
Unchanged on exit.
tran - CHARACTER*1.
On entry, tran specifies the form of op( A ) to be used in
the matrix multiplication as follows:
tran = 'N' or 'n' op( A ) = A.
tran = 'T' or 't' op( A ) = A'.
tran = 'C' or 'c' op( A ) = A'.
Unchanged on exit.
diag - CHARACTER*1.
On entry, diag specifies whether or not A is unit triangular
as follows:
diag = 'U' or 'u' A is assumed to be unit triangular.
diag = 'N' or 'n' A is not assumed to be unit
triangular.
Unchanged on exit.
m - INTEGER.
On entry, m specifies the number of rows of B. m must be at
least zero.
Unchanged on exit.
n - INTEGER.
On entry, n specifies the number of columns of B. n must be
at least zero.
Unchanged on exit.
alpha - REAL.
On entry, alpha specifies the scalar alpha. When alpha is
zero then A is not referenced and B need not be set before
entry.
Unchanged on exit.
A - REAL array of DIMENSION ( lda, k ), where k is m
when side = 'L' or 'l' and is n when side = 'R' or 'r'.
Before entry with uplo = 'U' or 'u', the leading k by k
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = 'L' or 'l', the leading k by k
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = 'U' or 'u', the diagonal elements of
A are not referenced either, but are assumed to be unity.
Unchanged on exit.
lda - INTEGER.
On entry, lda specifies the first dimension of A as declared
in the calling (sub) program. When side = 'L' or 'l' then
lda must be at least max( 1, m ), when side = 'R' or 'r'
then lda must be at least max( 1, n ).
Unchanged on exit.
b - REAL array of DIMENSION ( ldb, n ).
Before entry, the leading m by n part of the array B must
contain the right-hand side matrix B, and on exit is
overwritten by the solution matrix X.
ldb - INTEGER.
On entry, ldb specifies the first dimension of B as declared
in the calling (sub) program. ldb must be at least
max( 1, m ).
Unchanged on exit.
d_dinvA - workspace of size NB*((M+NB-1)/NB))*NB, on device.
d_x - workspace of size N*M, on device.
Level 3 Blas routine.
===================================================================== */
int i;
/* quick return on wrong size */
if (M<=0 || N<=0)
return;
if (side == 'l' || side == 'L')
{
/* invert the diagonals */
if (flag == 1) {
diag_dtrtri (M, uplo, diag, A, d_dinvA, lda);
}
if (tran == 'N' || tran == 'n')
/* the non-transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int MM = min (NB, M);
hipblasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M);
if (NB>=M)
{
b_copy();
return;
}
hipblasDgemm ('N', 'N', M-NB, N, NB, -1.0, A+NB, lda, d_x, M, alpha, b+NB, ldb);
/* the rest blocks */
for (i=NB; i<M; i+=NB)
{
MM = min (M-i, NB);
hipblasDgemm ('N', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i+NB>=M)
break;
hipblasDgemm ('N', 'N', M-i-NB, N, NB, -1.0, A+i*lda+i+NB, lda, d_x+i, M, 1.0, b+i+NB, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int MM = (M%NB==0)?NB:(M%NB);
i = M-MM;
hipblasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M);
if (i-NB<0)
{
b_copy();
return;
}
hipblasDgemm ('N', 'N', i, N, MM, -1.0, A+i*lda, lda, d_x+i, M, alpha, b, ldb);
/* the rest blocks */
for (i=M-MM-NB; i>=0; i-=NB)
{
hipblasDgemm ('N', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M);
if (i-NB<0)
break;
hipblasDgemm ('N', 'N', i, N, NB, -1.0, A+i*lda, lda, d_x+i, M, 1.0, b, ldb);
}
}
}
else
/* the transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int MM = (M%NB==0)?NB:(M%NB);
i=M-MM;
hipblasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i-NB<0)
{
b_copy();
return;
}
hipblasDgemm ('T', 'N', i, N, MM, -1.0, A+i, lda, d_x+i, M, alpha, b, ldb);
/* the rest blocks */
for (i=M-MM-NB; i>=0; i-=NB)
{
hipblasDgemm ('T', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i-NB<0)
break;
hipblasDgemm ('T', 'N', i, N, NB, -1.0, A+i, lda, d_x+i, M, 1.0, b, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int MM = min (NB, M);
hipblasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M);
if (NB>=M)
{
b_copy();
return;
}
hipblasDgemm ('T', 'N', M-NB, N, NB, -1.0, A+(NB)*lda, lda, d_x, M, alpha, b+NB, ldb);
/* the rest blocks */
for (i=NB; i<M; i+=NB)
{
MM = min (M-i, NB);
hipblasDgemm ('T', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i+NB>=M)
break;
hipblasDgemm ('T', 'N', M-i-NB, N, NB, -1.0, A+(i+NB)*lda+i, lda, d_x+i, M, 1.0, b+i+NB, ldb);
}
}
}
}
else
{ // side=R
/* invert the diagonals */
if (flag == 1) {
diag_dtrtri (N, uplo, diag, A, d_dinvA, lda);
}
if (tran == 'N' || tran == 'n')
/* the non-transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int NN = (N%NB==0)?NB:(N%NB);
i=N-NN;
hipblasDgemm ('N', 'N', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
{
b_copy();
return;
}
hipblasDgemm ('N', 'N', M, i, NN, -1.0, d_x+i*M, M, A+i, lda, alpha, b, ldb);
/* the rest blocks */
for (i=N-NN-NB; i>=0; i-=NB)
{
hipblasDgemm ('N', 'N', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
break;
hipblasDgemm ('N', 'N', M, i, NB, -1.0, d_x+i*M, M, A+i, lda, 1.0, b, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int NN = min(NB, N);
hipblasDgemm ('N', 'N', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M);
if (NB>=N)
{
b_copy();
return;
}
hipblasDgemm ('N', 'N', M, N-NB, NB, -1.0, d_x, M, A+NB*lda, lda, alpha, b+NB*ldb, ldb);
/* the rest blocks */
for (i=NB; i<N; i+=NB)
{
NN = min(NB, N-i);
hipblasDgemm ('N', 'N', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M);
if (i+NB>=N)
break;
hipblasDgemm ('N', 'N', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+(i+NB)*lda+i, lda, 1.0, b+(i+NB)*ldb, ldb);
}
}
}
else
/* the transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int NN = min(NB, N);
hipblasDgemm ('N', 'T', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M);
if (NB>=N)
{
b_copy();
return;
}
hipblasDgemm ('N', 'T', M, N-NB, NB, -1.0, d_x, M, A+NB, lda, alpha, b+NB*ldb, ldb);
/* the rest blocks */
for (i=NB; i<N; i+=NB)
{
NN = min(NB, N-i);
hipblasDgemm ('N', 'T', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M);
if (i+NB>=N)
break;
hipblasDgemm ('N', 'T', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+i*lda+NB+i, lda, 1.0, b+(i+NB)*ldb, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int NN = (N%NB==0)?NB:(N%NB);
i=N-NN;
hipblasDgemm ('N', 'T', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
{
b_copy();
return;
}
hipblasDgemm ('N', 'T', M, i, NN, -1.0, d_x+i*M, M, A+i*lda, lda, alpha, b, ldb);
/* the rest blocks */
for (i=N-NN-NB; i>=0; i-=NB)
{
hipblasDgemm ('N', 'T', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
break;
hipblasDgemm ('N', 'T', M, i, NB, -1.0, d_x+i*M, M, A+i*lda, lda, 1.0, b, ldb);
}
}
}
}
b_copy();
}
| 0aa63f701e8e03beeb1fbf09b2a360dd7ba19208.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@precisions normal d -> d
@author Peng Du
*/
#include "common_magma.h"
#define qmod(a,b) ((a)-(__mul24((b),(a)/(b))))
#define cublasDgemm magmablas_dgemm
__global__ void
b_copy_kernel (int M, int N, double *b, int ldb, double *d_x, int ldx);
extern "C"
void diag_dtrtri (magma_int_t M, char uplo, char diag, const double *A, double *d_dinvA, magma_int_t lda);
#define b_copy(); dim3 dimBlock((M>=MAX_THREAD_PER_BLOCK)?MAX_THREAD_PER_BLOCK:(WARP_SIZE*((M/WARP_SIZE)+(M%WARP_SIZE!=0))), 1);\
dim3 dimGrid(M/dimBlock.x+(M%dimBlock.x!=0), N);\
b_copy_kernel<<< dimGrid, dimBlock, 0, magma_stream >>>(M, N, b, ldb, d_x, M);
//magma_device_sync();
#define MAX_THREAD_PER_BLOCK 512
#define WARP_SIZE 32
#define BLOCK_SIZE 16 // inner blocking size, <=32
#define NB 128// outer blocking size, >BLOCK_SIZE
/*
* magmablas_dtrsm
*/
extern "C"
void magmablas_dtrsm_work( char side, char uplo, char tran, char diag, magma_int_t M, magma_int_t N,
double alpha, const double* A, magma_int_t lda, double* b, magma_int_t ldb,
int flag, double *d_dinvA, double *d_x )
{
/* -- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
Purpose
=======
dtrsm solves one of the matrix equations on gpu
op( a )*x = alpha*b, or x*op( a ) = alpha*b,
where alpha is a scalar, x and b are m by n matrices, a is a unit, or
non-unit, upper or lower triangular matrix and op( a ) is one of
op( A ) = A or op( A ) = A'.
The matrix X is overwritten on B.
When M or N is not a multiple of blocking size, which is 32 for now, cublasDtrsm will
be called instead. There soon will not be this limitation both for arbitrary problem
size and blocking size.
This is an asynchronous version of magmablas_dtrsm with "workspace" as an argument.
Arguments
==========
side - CHARACTER*1.
On entry, side specifies whether op( A ) appears on the left
or right of X as follows:
side = 'L' or 'l' op( A )*X = alpha*B.
side = 'R' or 'r' X*op( A ) = alpha*B.
Unchanged on exit.
uplo - CHARACTER*1.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
uplo = 'U' or 'u' A is an upper triangular matrix.
uplo = 'L' or 'l' A is a lower triangular matrix.
Unchanged on exit.
tran - CHARACTER*1.
On entry, tran specifies the form of op( A ) to be used in
the matrix multiplication as follows:
tran = 'N' or 'n' op( A ) = A.
tran = 'T' or 't' op( A ) = A'.
tran = 'C' or 'c' op( A ) = A'.
Unchanged on exit.
diag - CHARACTER*1.
On entry, diag specifies whether or not A is unit triangular
as follows:
diag = 'U' or 'u' A is assumed to be unit triangular.
diag = 'N' or 'n' A is not assumed to be unit
triangular.
Unchanged on exit.
m - INTEGER.
On entry, m specifies the number of rows of B. m must be at
least zero.
Unchanged on exit.
n - INTEGER.
On entry, n specifies the number of columns of B. n must be
at least zero.
Unchanged on exit.
alpha - REAL.
On entry, alpha specifies the scalar alpha. When alpha is
zero then A is not referenced and B need not be set before
entry.
Unchanged on exit.
A - REAL array of DIMENSION ( lda, k ), where k is m
when side = 'L' or 'l' and is n when side = 'R' or 'r'.
Before entry with uplo = 'U' or 'u', the leading k by k
upper triangular part of the array A must contain the upper
triangular matrix and the strictly lower triangular part of
A is not referenced.
Before entry with uplo = 'L' or 'l', the leading k by k
lower triangular part of the array A must contain the lower
triangular matrix and the strictly upper triangular part of
A is not referenced.
Note that when diag = 'U' or 'u', the diagonal elements of
A are not referenced either, but are assumed to be unity.
Unchanged on exit.
lda - INTEGER.
On entry, lda specifies the first dimension of A as declared
in the calling (sub) program. When side = 'L' or 'l' then
lda must be at least max( 1, m ), when side = 'R' or 'r'
then lda must be at least max( 1, n ).
Unchanged on exit.
b - REAL array of DIMENSION ( ldb, n ).
Before entry, the leading m by n part of the array B must
contain the right-hand side matrix B, and on exit is
overwritten by the solution matrix X.
ldb - INTEGER.
On entry, ldb specifies the first dimension of B as declared
in the calling (sub) program. ldb must be at least
max( 1, m ).
Unchanged on exit.
d_dinvA - workspace of size NB*((M+NB-1)/NB))*NB, on device.
d_x - workspace of size N*M, on device.
Level 3 Blas routine.
===================================================================== */
int i;
/* quick return on wrong size */
if (M<=0 || N<=0)
return;
if (side == 'l' || side == 'L')
{
/* invert the diagonals */
if (flag == 1) {
diag_dtrtri (M, uplo, diag, A, d_dinvA, lda);
}
if (tran == 'N' || tran == 'n')
/* the non-transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int MM = min (NB, M);
cublasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M);
if (NB>=M)
{
b_copy();
return;
}
cublasDgemm ('N', 'N', M-NB, N, NB, -1.0, A+NB, lda, d_x, M, alpha, b+NB, ldb);
/* the rest blocks */
for (i=NB; i<M; i+=NB)
{
MM = min (M-i, NB);
cublasDgemm ('N', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i+NB>=M)
break;
cublasDgemm ('N', 'N', M-i-NB, N, NB, -1.0, A+i*lda+i+NB, lda, d_x+i, M, 1.0, b+i+NB, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int MM = (M%NB==0)?NB:(M%NB);
i = M-MM;
cublasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M);
if (i-NB<0)
{
b_copy();
return;
}
cublasDgemm ('N', 'N', i, N, MM, -1.0, A+i*lda, lda, d_x+i, M, alpha, b, ldb);
/* the rest blocks */
for (i=M-MM-NB; i>=0; i-=NB)
{
cublasDgemm ('N', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M);
if (i-NB<0)
break;
cublasDgemm ('N', 'N', i, N, NB, -1.0, A+i*lda, lda, d_x+i, M, 1.0, b, ldb);
}
}
}
else
/* the transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int MM = (M%NB==0)?NB:(M%NB);
i=M-MM;
cublasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i-NB<0)
{
b_copy();
return;
}
cublasDgemm ('T', 'N', i, N, MM, -1.0, A+i, lda, d_x+i, M, alpha, b, ldb);
/* the rest blocks */
for (i=M-MM-NB; i>=0; i-=NB)
{
cublasDgemm ('T', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i-NB<0)
break;
cublasDgemm ('T', 'N', i, N, NB, -1.0, A+i, lda, d_x+i, M, 1.0, b, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int MM = min (NB, M);
cublasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M);
if (NB>=M)
{
b_copy();
return;
}
cublasDgemm ('T', 'N', M-NB, N, NB, -1.0, A+(NB)*lda, lda, d_x, M, alpha, b+NB, ldb);
/* the rest blocks */
for (i=NB; i<M; i+=NB)
{
MM = min (M-i, NB);
cublasDgemm ('T', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M);
if (i+NB>=M)
break;
cublasDgemm ('T', 'N', M-i-NB, N, NB, -1.0, A+(i+NB)*lda+i, lda, d_x+i, M, 1.0, b+i+NB, ldb);
}
}
}
}
else
{ // side=R
/* invert the diagonals */
if (flag == 1) {
diag_dtrtri (N, uplo, diag, A, d_dinvA, lda);
}
if (tran == 'N' || tran == 'n')
/* the non-transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int NN = (N%NB==0)?NB:(N%NB);
i=N-NN;
cublasDgemm ('N', 'N', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
{
b_copy();
return;
}
cublasDgemm ('N', 'N', M, i, NN, -1.0, d_x+i*M, M, A+i, lda, alpha, b, ldb);
/* the rest blocks */
for (i=N-NN-NB; i>=0; i-=NB)
{
cublasDgemm ('N', 'N', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
break;
cublasDgemm ('N', 'N', M, i, NB, -1.0, d_x+i*M, M, A+i, lda, 1.0, b, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int NN = min(NB, N);
cublasDgemm ('N', 'N', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M);
if (NB>=N)
{
b_copy();
return;
}
cublasDgemm ('N', 'N', M, N-NB, NB, -1.0, d_x, M, A+NB*lda, lda, alpha, b+NB*ldb, ldb);
/* the rest blocks */
for (i=NB; i<N; i+=NB)
{
NN = min(NB, N-i);
cublasDgemm ('N', 'N', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M);
if (i+NB>=N)
break;
cublasDgemm ('N', 'N', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+(i+NB)*lda+i, lda, 1.0, b+(i+NB)*ldb, ldb);
}
}
}
else
/* the transpose case */
{
if (uplo == 'L' || uplo == 'l')
{
/* the lower case */
/* handle the first block seperately with alpha */
int NN = min(NB, N);
cublasDgemm ('N', 'T', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M);
if (NB>=N)
{
b_copy();
return;
}
cublasDgemm ('N', 'T', M, N-NB, NB, -1.0, d_x, M, A+NB, lda, alpha, b+NB*ldb, ldb);
/* the rest blocks */
for (i=NB; i<N; i+=NB)
{
NN = min(NB, N-i);
cublasDgemm ('N', 'T', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M);
if (i+NB>=N)
break;
cublasDgemm ('N', 'T', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+i*lda+NB+i, lda, 1.0, b+(i+NB)*ldb, ldb);
}
}
else
{
/* the upper case */
/* handle the first block seperately with alpha */
int NN = (N%NB==0)?NB:(N%NB);
i=N-NN;
cublasDgemm ('N', 'T', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
{
b_copy();
return;
}
cublasDgemm ('N', 'T', M, i, NN, -1.0, d_x+i*M, M, A+i*lda, lda, alpha, b, ldb);
/* the rest blocks */
for (i=N-NN-NB; i>=0; i-=NB)
{
cublasDgemm ('N', 'T', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M);
if (i-NB<0)
break;
cublasDgemm ('N', 'T', M, i, NB, -1.0, d_x+i*M, M, A+i*lda, lda, 1.0, b, ldb);
}
}
}
}
b_copy();
}
|
dc0c277ab38808be31692b539454d3efdf968bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "utils.h"
#ifndef assert
#define assert(e) \
if (!(e)) { \
printf("failed assertion `%s'\n", #e); \
THError("aborting..."); \
};
#endif
/*
This file contains 2 kernels :
- copyPixelsInSlices.
- addPixelsInSlices.
The primary kernel is copyPixelsInSlices : it unfolds a 3D matrix into a 2D matrix in a way that the 2D convolution (with many kernels) becomes a matrix multiplication.
We call the resulting matrix "kernelSlices". Each row corresponds to a kW*kH*nInputPlane array.
Steps :
1) choose a pixel (pixi = blockIdx.x, pixj = blockIdx.y)
2) find which slices (coordinates (imin-imax, jmin-jmax)) will contain the pixel information
3) loop : copy the pixel information, jump to next slice (and position) by
moving the kernelSlices pointer ptrkslices by stridej = (kH*kW - dW) * nInputPlane
detailed example : pixel (4,4), kernels of size 5*5, stride dW=1 :
- 1st slice : top-left coordinates : (imin,jmin) . Pixel is in coordinates (4,4, position 25) of the slice.
- 2nd slice : top-left coordinates : (imin,jmin+1). Pixel is in coordinates (4,3, position 24) of the slice.
- 3rd slice : top-left coordinates : (imin,jmin+2). Pixel is in coordinates (4,2, position 23) of the slice.
- 4th slice : top-left coordinates : (imin,jmin+2). Pixel is in coordinates (4,1, position 22) of the slice.
- 5th slice : top-left coordinates : (imin,jmin+2). Pixel is in coordinates (4,0, position 21) of the slice.
- when jmax-jmin slices have been filled, we jump to the next series of slices by
moving ptrkslices by stridei = (((size2-jmax+jmin-1)*kH -dH)*kW + (jmax-jmin+1)*dW)*nInputPlane
- 1st slice : top-left coordinates : (imin+1,jmin) . Pixel is in coordinates (3,4, position 20) of the slice.
- 2nd slice : top-left coordinates : (imin+1,jmin+1). Pixel is in coordinates (3,3, position 19) of the slice.
- 3rd slice : top-left coordinates : (imin+1,jmin+2). Pixel is in coordinates (3,2, position 18) of the slice.
- 4th slice : top-left coordinates : (imin+1,jmin+2). Pixel is in coordinates (3,1, position 17) of the slice.
- 5th slice : top-left coordinates : (imin+1,jmin+2). Pixel is in coordinates (3,0, position 16) of the slice.
- ...
In case the pixel (pixi,pixj) is in the zero-padding, we fill the slice with zeros.
addPixelsInSlices is the same, except we read the contents of the array instead of writing.
*/
__global__ void copyPixelsInSlices(float *ptrinput0, float *ptrkslices0,
int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int inputstr0, int kslicesstr0, int batchsize)
{
const int pixi=blockIdx.x;
const int pixj=blockIdx.y;
const int blk =blockDim.x*blockDim.y;
const int tidx=threadIdx.x+blockDim.x*threadIdx.y;
__shared__ int _imin, _jmin, _imax, _jmax, _stridej, _stridei, _ksliceoffset, _inputoffset;
int imin, jmin, imax, jmax;
int stridej, stridei, ksliceoffset, inputoffset;
if(tidx==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
stridej = (kH*kW - dW) * nInputPlane;
stridei = (((size2-jmax+jmin-1)*kH -dH)*kW + (jmax-jmin+1)*dW)*nInputPlane;
ksliceoffset = ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane + kslicesstr0*blockIdx.z;
inputoffset = ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane + inputstr0*blockIdx.z;
_imin=imin;
_jmin=jmin;
_imax=imax;
_jmax=jmax;
_stridej=stridej;
_stridei=stridei;
_ksliceoffset=ksliceoffset;
_inputoffset=inputoffset;
}
__syncthreads();
if(threadIdx.x==0 && threadIdx.y>0)
{
imin=_imin;
jmin=_jmin;
imax=_imax;
jmax=_jmax;
stridej=_stridej;
stridei=_stridei;
ksliceoffset=_ksliceoffset;
inputoffset=_inputoffset;
}
imin=__shfl(imin, 0);
jmin=__shfl(jmin, 0);
imax=__shfl(imax, 0);
jmax=__shfl(jmax, 0);
stridej=__shfl(stridej, 0);
stridei=__shfl(stridei, 0);
ksliceoffset=__shfl(ksliceoffset, 0);
inputoffset=__shfl(inputoffset, 0);
int i;
int j;
int k;
bool zeropad=pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
float * ptrinput = ptrinput0 + inputoffset;
float * ptrkslices = ptrkslices0 + ksliceoffset;
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
if(zeropad)
{
for(k=tidx; k<nInputPlane; k+=blk) {
ptrkslices[k]=0;
}
}
else {
for(k=tidx; k<nInputPlane; k+=blk) {
ptrkslices[k]=ptrinput[k];
}
}
ptrkslices += stridej;
}
ptrkslices += stridei;
}
}
__global__ void copyPixelsInSlicesRGB(float *ptrinput0, float *ptrkslices0,
int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int inputstr0, int kslicesstr0, int batchsize)
{
// each block does one pixel of the input image
// each kernel slice is represented by its upper-left coordinates
const int pixi=blockIdx.x;
const int pixj=blockIdx.y*blockDim.y + threadIdx.y;
const int tidx=threadIdx.x;
const int batchindex=blockIdx.z*blockDim.z+threadIdx.z;
int i,j;
int imin, jmin, imax, jmax;
int inputoffset, ksliceoffset;
// step 1 : find which kernel slices contain the values of the pixel
__shared__ int _imin, _jmin[32], _imax, _jmax[32], _inputoffset[32][3], _ksliceoffset[32][3];
if(threadIdx.z==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
if(threadIdx.x==0 && threadIdx.y==0)
{
_imin=imin;
_imax=imax;
}
if(threadIdx.x==0)
{
_jmin[threadIdx.y]=jmin;
_jmax[threadIdx.y]=jmax;
}
inputoffset = inputstr0*blockIdx.z*blockDim.z + ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane ;
ksliceoffset= kslicesstr0*blockIdx.z*blockDim.z + ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane;
_inputoffset[threadIdx.y][threadIdx.x]=inputoffset;
_ksliceoffset[threadIdx.y][threadIdx.x]=ksliceoffset;
}
__syncthreads();
if(batchindex >= batchsize) return;
if(pixj > isize2 + padleft + padright -1) return;
if(threadIdx.z>0)
{
imin=_imin;
imax=_imax;
jmin=_jmin[threadIdx.y];
jmax=_jmax[threadIdx.y];
inputoffset=_inputoffset[threadIdx.y][threadIdx.x];
ksliceoffset=_ksliceoffset[threadIdx.y][threadIdx.x];
}
// step 2 : move the pointers
// this one goes to where the pixel is at
ptrinput0 += inputoffset+inputstr0*threadIdx.z ;
ptrkslices0 += ksliceoffset+kslicesstr0*threadIdx.z ;
const int stridej = (kH*kW - dW) * nInputPlane;
const int stridei = (size2*kH-dH) * kW *nInputPlane - (jmax-jmin+1) * stridej ;
bool zeropad = pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
// read pixel
// load the stuff first...
//for (b=0; b<batchsize; b++)
//{
float * ptrinput = ptrinput0;
float * ptrkslices = ptrkslices0;
float pixvalue;
if (zeropad) {
pixvalue=0;
}
else {
pixvalue=ptrinput[tidx];
}
// write to memory
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
if(zeropad)
{
ptrkslices[tidx]=0;
}
else {
ptrkslices[tidx]=pixvalue;
}
ptrkslices += stridej;
}
ptrkslices += stridei;
}
//}
}
__global__ void addPixelsInSlices(float *ptrgradinput0, float *ptrkslices0,
int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int gradinputstr0, int kslicesstr0, int batchsize)
{
const int pixi=blockIdx.x;
const int pixj=blockIdx.y;
const int blk =blockDim.x*blockDim.y;
const int tidx=threadIdx.x+blockDim.x*threadIdx.y;
bool zeropad=pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
if(zeropad) return;
__shared__ int _imin, _jmin, _imax, _jmax, _stridej, _stridei, _ksliceoffset, _gradinputoffset;
int stridej, stridei, ksliceoffset, gradinputoffset;
int imin;
int jmin;
int imax;
int jmax;
if(threadIdx.y==0 && threadIdx.x==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
stridej = (kH*kW - dW) * nInputPlane;
stridei = (((size2-jmax+jmin-1)*kH -dH)*kW + (jmax-jmin+1)*dW)*nInputPlane;
ksliceoffset = ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane + kslicesstr0*blockIdx.z;
gradinputoffset = ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane + gradinputstr0*blockIdx.z;
_imin=imin;
_jmin=jmin;
_imax=imax;
_jmax=jmax;
_stridej=stridej;
_stridei=stridei;
_ksliceoffset=ksliceoffset;
_gradinputoffset=gradinputoffset;
}
__syncthreads();
if(threadIdx.x==0 && threadIdx.y>0)
{
imin=_imin;
jmin=_jmin;
imax=_imax;
jmax=_jmax;
stridej=_stridej;
stridei=_stridei;
ksliceoffset=_ksliceoffset;
gradinputoffset=_gradinputoffset;
}
imin=__shfl(imin, 0);
jmin=__shfl(jmin, 0);
imax=__shfl(imax, 0);
jmax=__shfl(jmax, 0);
stridej=__shfl(stridej, 0);
stridei=__shfl(stridei, 0);
ksliceoffset=__shfl(ksliceoffset, 0);
gradinputoffset=__shfl(gradinputoffset, 0);
int i;
int j;
int k;
for(k=tidx; k<nInputPlane; k+=blk) {
float * ptrgradinput = ptrgradinput0 + gradinputoffset;
float * ptrkslices = ptrkslices0 + ksliceoffset;
float v=0;
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
v += ptrkslices[k];
ptrkslices += stridej;
}
ptrkslices += stridei;
}
ptrgradinput[k] += v;
}
}
__global__ void copyBiasToOutputs(float *ptrbias, float *ptroutput, const int size1, const int size2, const int nOutputPlane, const int linestride, const int imstride)
{
// each thread has a value to manage...
//const int blk =blockDim.x;
const int tidx=blockDim.x*blockIdx.x + threadIdx.x;
const int tidy=blockIdx.y;
const int tidz=blockIdx.z;
float val = ptrbias[tidx];
ptroutput+= tidz*imstride + tidy*linestride;
for(int k=0; k<size2; k++)
{
if(tidx<nOutputPlane) {
ptroutput[k*nOutputPlane+tidx]=val;
}
}
}
void copyBiasVector(THCudaTensor* output, THCudaTensor* bias)
{
float* ptrbias = THCudaTensor_data(NULL, bias);
float* ptroutput = THCudaTensor_data(NULL, output);
int nOutputPlane = bias->size[0];
int batchsize = output->size[0];
int size1 = output->size[1];
int size2 = output->size[2];
// fill output with biases
dim3 blocksbias ((nOutputPlane+31)/32, size1, batchsize);
dim3 threadsbias (32);
hipLaunchKernelGGL(( copyBiasToOutputs), dim3(blocksbias), dim3(threadsbias), 0, 0, ptrbias, ptroutput, size1, size2, nOutputPlane, output->stride[1], output->stride[0]);
}
void sliceInput(THCudaTensor *input, THCudaTensor* kernelSlices, int kH, int kW, int dH, int dW, int padup, int paddown, int padleft, int padright)
{
// find the size of kernelslices
long batchsize = input->size[0];
long isize1 = input->size[1];
long isize2 = input->size[2];
long nInputPlane = input->size[3];
long size1 = (isize1 - kH + padup + paddown) / dH + 1;
long size2 = (isize2 - kW + padleft + padright) / dW + 1;
float* ptrkslices = THCudaTensor_data(NULL, kernelSlices);
float* ptrinput = THCudaTensor_data(NULL, input);
int inputstr0=input->stride[0];
int kslicesstr0=size1*size2*kW*kH*nInputPlane;
//kernel unfold inputs
if (nInputPlane ==3)
{
dim3 blocksRGB (isize1 + padup + paddown, (isize2 + padleft + padright+31)/32, (batchsize+3)/4);
dim3 threadsRGB (3,32,4);
hipLaunchKernelGGL(( copyPixelsInSlicesRGB) , dim3(blocksRGB), dim3(threadsRGB), 0, 0, ptrinput, ptrkslices,
dH, dW, kH, kW, size1, size2, isize1, isize2, nInputPlane, padleft, padright, padup, paddown, inputstr0, kslicesstr0, batchsize);
}
else
{
int b_y;
if (nInputPlane>1024)
{
b_y=32;
}
else
{
b_y=(nInputPlane+31)/32;
}
dim3 blocks (isize1 + padup + paddown, isize2 + padleft + padright, batchsize);
dim3 threads (32,b_y);
hipLaunchKernelGGL(( copyPixelsInSlices), dim3(blocks), dim3(threads), 0, 0, ptrinput, ptrkslices,
dH, dW, kH, kW, size1, size2, isize1, isize2, nInputPlane, padleft, padright, padup, paddown, inputstr0, kslicesstr0, batchsize);
}
}
void unsliceGradient(THCudaTensor *backwardSlices, THCudaTensor *gradInput, THCudaTensor *gradOutput, int kH, int kW, int dH, int dW, int padup, int paddown, int padleft, int padright)
{
long batchsize = gradInput->size[0];
long isize1 = gradInput->size[1];
long isize2 = gradInput->size[2];
long nInputPlane = gradInput->size[3];
long size1 = gradOutput->size[1];
long size2 = gradOutput->size[2];
float* ptrbackslices = THCudaTensor_data(NULL, backwardSlices);
float* ptrgradinput = THCudaTensor_data(NULL, gradInput);
int b_y;
if (nInputPlane>1024)
{
b_y=32;
}
else
{
b_y=(nInputPlane+31)/32;
}
dim3 blocks (isize1 + padup + paddown, isize2 + padleft + padright, batchsize);
dim3 threads (32,b_y);
int gradinputstr0=gradInput->stride[0];
int kslicesstr0=size1*size2*kW*kH*nInputPlane;
hipLaunchKernelGGL(( addPixelsInSlices), dim3(blocks), dim3(threads), 0, 0, ptrgradinput, ptrbackslices,
dH, dW, kH, kW, size1, size2, isize1, isize2, nInputPlane, padleft, padright, padup, paddown, gradinputstr0, kslicesstr0, batchsize);
}
static int cunxn_SpatialConvolutionUnfold_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor *kernels = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
// THCudaTensor *kSlices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "kernelSlices", "torch.CudaTensor");
long kW = luaT_getfieldcheckint(L, 1, "kW");
long kH = luaT_getfieldcheckint(L, 1, "kH");
long dW = luaT_getfieldcheckint(L, 1, "dW");
long dH = luaT_getfieldcheckint(L, 1, "dH");
long padup = luaT_getfieldcheckint(L, 1, "padtop");
long paddown = luaT_getfieldcheckint(L, 1, "padbottom");
long padleft = luaT_getfieldcheckint(L, 1, "padleft");
long padright = luaT_getfieldcheckint(L, 1, "padright");
long nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
long nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
// input should be contiguous already but... well.
input = THCudaTensor_newContiguous(state, input);
// find the size of kernelslices
long batchsize = input->size[0];
long isize1 = input->size[1];
long isize2 = input->size[2];
long size1 = (isize1 - kH + padup + paddown) / dH + 1;
long size2 = (isize2 - kW + padleft + padright) / dW + 1;
THCudaTensor_resize4d(state, output, batchsize, size1, size2, nOutputPlane);
copyBiasVector(output, bias);
// unfold conv kernels by resizing
THCudaTensor_resize2d(state, kernels, nOutputPlane, kW*kH*nInputPlane);
THCudaTensor_transpose(state, kernels, NULL, 0, 1);
// in case there is not enough memory
size_t freeMem;
THCudaCheck(hipMemGetInfo (&freeMem, NULL));
int nsplits=1;
while(batchsize/nsplits*size1*size2*kW*kH*nInputPlane * 4 > freeMem)
{
nsplits *= 2;
}
int newbatchsize=(batchsize+nsplits-1)/nsplits;
THCudaTensor* kernelSlices = THCudaTensor_newWithSize2d(state, newbatchsize*size1*size2,kW*kH*nInputPlane);
for(int split=0; split<nsplits; split++)
{
int splitsize=newbatchsize;
if(split*newbatchsize+splitsize > batchsize)
{
splitsize=batchsize-split*newbatchsize;
THCudaTensor_resize2d(state, kernelSlices, splitsize*size1*size2, kW*kH*nInputPlane);
}
THCudaTensor* inputSplit = THCudaTensor_newNarrow(state,input, 0, split*newbatchsize, splitsize);
sliceInput(inputSplit, kernelSlices, kH, kW, dH, dW, padup, paddown, padleft, padright);
THCudaTensor_free(state, inputSplit);
THCudaTensor* outputSplit = THCudaTensor_newNarrow(state, output, 0, split*newbatchsize, splitsize);
// put output in matrix mode
THCudaTensor_resize2d(state, outputSplit, splitsize* size1* size2, nOutputPlane);
// printf("sgemm\n");
THCudaTensor_addmm(state, outputSplit, 1, outputSplit, 1, kernelSlices, kernels);
THCudaTensor_free(state, outputSplit);
}
THCudaTensor_free(state, kernelSlices);
THCudaTensor_transpose(state, kernels, NULL, 0, 1);
THCudaTensor_resize4d(state, kernels, nOutputPlane, kH, kW, nInputPlane);
// check for errors
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in copyPixelsInSlices: %s\n", hipGetErrorString(err));
THError("aborting");
}
THCudaTensor_resize4d(state, output, batchsize, size1, size2, nOutputPlane);
THCudaTensor_free(state, input);
return 1;
}
static int cunxn_SpatialConvolutionUnfold_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
long kW = luaT_getfieldcheckint(L, 1, "kW");
long kH = luaT_getfieldcheckint(L, 1, "kH");
long dW = luaT_getfieldcheckint(L, 1, "dW");
long dH = luaT_getfieldcheckint(L, 1, "dH");
long padup = luaT_getfieldcheckint(L, 1, "padtop");
long paddown = luaT_getfieldcheckint(L, 1, "padbottom");
long padleft = luaT_getfieldcheckint(L, 1, "padleft");
long padright = luaT_getfieldcheckint(L, 1, "padright");
long nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
long nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
THCudaTensor *kernels = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THCudaTensor_resize2d(state, kernels, nOutputPlane, kW*kH*nInputPlane);
long batchsize = input->size[0];
long size1 = gradOutput->size[1];
long size2 = gradOutput->size[2];
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_fill(state, gradInput, 0);
// in case there is not enough memory
size_t freeMem;
THCudaCheck(hipMemGetInfo (&freeMem, NULL));
int nsplits=1;
while(batchsize/nsplits*size1*size2*kW*kH*nInputPlane * 4 > freeMem)
{
nsplits *= 2;
}
int newbatchsize=(batchsize+nsplits-1)/nsplits;
THCudaTensor* backwardSlices = THCudaTensor_newWithSize2d(state, newbatchsize*size1*size2,kW*kH*nInputPlane);
for(int split=0; split<nsplits; split++)
{
int splitsize=newbatchsize;
if(split*newbatchsize+splitsize > batchsize)
{
splitsize=batchsize-split*newbatchsize;
THCudaTensor_resize2d(state, backwardSlices, splitsize*size1*size2, kW*kH*nInputPlane);
}
THCudaTensor* gradOutputSplit = THCudaTensor_newNarrow(state, gradOutput, 0, split*newbatchsize, splitsize);
THCudaTensor* gradInputSplit = THCudaTensor_newNarrow(state, gradInput, 0, split*newbatchsize, splitsize);
THCudaTensor_resize2d(state, gradOutputSplit, splitsize*size1*size2, nOutputPlane);
// backprop gradinput into the slices
THCudaTensor_addmm(state, backwardSlices, 0, backwardSlices, 1, gradOutputSplit, kernels);
THCudaTensor_resize4d(state, gradOutputSplit, splitsize, size1, size2, nOutputPlane);
unsliceGradient(backwardSlices, gradInputSplit, gradOutputSplit, kH, kW, dH, dW, padup, paddown, padleft, padright);
THCudaTensor_free(state, gradInputSplit);
THCudaTensor_free(state, gradOutputSplit);
}
// we resize gradOutput back to what it was...
THCudaTensor_resize4d(state, gradOutput, batchsize, size1, size2, nOutputPlane);
THCudaTensor_resize4d(state, kernels, nOutputPlane, kH, kW, nInputPlane);
THCudaTensor_free(state, backwardSlices);
return 1;
}
static int cunxn_SpatialConvolutionUnfold_accGradParameters(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
long kW = luaT_getfieldcheckint(L, 1, "kW");
long kH = luaT_getfieldcheckint(L, 1, "kH");
long dW = luaT_getfieldcheckint(L, 1, "dW");
long dH = luaT_getfieldcheckint(L, 1, "dH");
long padup = luaT_getfieldcheckint(L, 1, "padtop");
long paddown = luaT_getfieldcheckint(L, 1, "padbottom");
long padleft = luaT_getfieldcheckint(L, 1, "padleft");
long padright = luaT_getfieldcheckint(L, 1, "padright");
long nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
long nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
float scale = luaL_optnumber(L, 4, 1);
// find the size of kernelslices
long batchsize = gradOutput->size[0];
long size1 = gradOutput->size[1];
long size2 = gradOutput->size[2];
THCudaTensor *gradWeight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
THCudaTensor *gradBias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradBias", "torch.CudaTensor");
float* ptrgradbias = THCudaTensor_data(state, gradBias);
float* ptrgradoutput = THCudaTensor_data(state, gradOutput);
dim3 blocksgradbias (nOutputPlane+31/32);
dim3 threadsgradbias (32,32);
THCudaTensor_resize2d(state, gradWeight, nOutputPlane, kW*kH*nInputPlane);
size_t freeMem;
THCudaCheck(hipMemGetInfo (&freeMem, NULL));
int nsplits=1;
while(batchsize/nsplits*size1*size2*kW*kH*nInputPlane * 4 > freeMem)
{
nsplits *= 2;
}
int newbatchsize=(batchsize+nsplits-1)/nsplits;
THCudaTensor* kernelSlices = THCudaTensor_newWithSize2d(state, newbatchsize*size1*size2,kW*kH*nInputPlane);
for(int split=0; split<nsplits; split++)
{
int splitsize=newbatchsize;
if(split*newbatchsize+splitsize > batchsize)
{
splitsize=batchsize-split*newbatchsize;
THCudaTensor_resize2d(state, kernelSlices, splitsize*size1*size2, kW*kH*nInputPlane);
}
THCudaTensor* gradOutputSplit = THCudaTensor_newNarrow(state, gradOutput, 0, split*newbatchsize, splitsize);
THCudaTensor_resize2d(state, gradOutputSplit, splitsize*size1* size2, nOutputPlane);
THCudaTensor_transpose(state, gradOutputSplit, NULL, 0, 1);
THCudaTensor* inputSplit = THCudaTensor_newNarrow(state, input, 0, split*newbatchsize, splitsize);
sliceInput(inputSplit, kernelSlices, kH, kW, dH, dW, padup, paddown, padleft, padright);
THCudaTensor_addmm(state, gradWeight, 1, gradWeight, scale, gradOutputSplit, kernelSlices);
THCudaTensor_free(state, inputSplit);
THCudaTensor_free(state, gradOutputSplit);
}
THCudaTensor* gradOutTmp = THCudaTensor_newNarrow(state, gradOutput, 0, 0, batchsize);
THCudaTensor_resize2d(state, gradOutTmp, batchsize*size1*size2, nOutputPlane);
THCudaTensor* ones = THCudaTensor_newWithSize2d(state, 1,batchsize*size1*size2);
THCudaTensor_fill(state, ones, 1);
THCudaTensor_resize2d(state, gradBias, 1, nOutputPlane);
THCudaTensor_addmm(state, gradBias, 1, gradBias, scale, ones, gradOutTmp);
THCudaTensor_resize1d(state, gradBias, nOutputPlane);
THCudaTensor_free(state, ones);
THCudaTensor_free(state, gradOutTmp);
THCudaTensor_resize4d(state, gradWeight, nOutputPlane, kH, kW, nInputPlane);
// we resize gradOutput back to what it was...
THCudaTensor_free(state, kernelSlices);
return 1;
}
static const struct luaL_Reg cunxn_SpatialConvolutionUnfold__ [] = {
{"SpatialConvolutionUnfold_updateOutput", cunxn_SpatialConvolutionUnfold_updateOutput},
{"SpatialConvolutionUnfold_updateGradInput", cunxn_SpatialConvolutionUnfold_updateGradInput},
{"SpatialConvolutionUnfold_accGradParameters", cunxn_SpatialConvolutionUnfold_accGradParameters},
{NULL, NULL}
};
static void cunxn_SpatialConvolutionUnfold_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunxn_SpatialConvolutionUnfold__, "nn");
lua_pop(L,1);
}
| dc0c277ab38808be31692b539454d3efdf968bfe.cu | #include "utils.h"
#ifndef assert
#define assert(e) \
if (!(e)) { \
printf("failed assertion `%s'\n", #e); \
THError("aborting..."); \
};
#endif
/*
This file contains 2 kernels :
- copyPixelsInSlices.
- addPixelsInSlices.
The primary kernel is copyPixelsInSlices : it unfolds a 3D matrix into a 2D matrix in a way that the 2D convolution (with many kernels) becomes a matrix multiplication.
We call the resulting matrix "kernelSlices". Each row corresponds to a kW*kH*nInputPlane array.
Steps :
1) choose a pixel (pixi = blockIdx.x, pixj = blockIdx.y)
2) find which slices (coordinates (imin-imax, jmin-jmax)) will contain the pixel information
3) loop : copy the pixel information, jump to next slice (and position) by
moving the kernelSlices pointer ptrkslices by stridej = (kH*kW - dW) * nInputPlane
detailed example : pixel (4,4), kernels of size 5*5, stride dW=1 :
- 1st slice : top-left coordinates : (imin,jmin) . Pixel is in coordinates (4,4, position 25) of the slice.
- 2nd slice : top-left coordinates : (imin,jmin+1). Pixel is in coordinates (4,3, position 24) of the slice.
- 3rd slice : top-left coordinates : (imin,jmin+2). Pixel is in coordinates (4,2, position 23) of the slice.
- 4th slice : top-left coordinates : (imin,jmin+2). Pixel is in coordinates (4,1, position 22) of the slice.
- 5th slice : top-left coordinates : (imin,jmin+2). Pixel is in coordinates (4,0, position 21) of the slice.
- when jmax-jmin slices have been filled, we jump to the next series of slices by
moving ptrkslices by stridei = (((size2-jmax+jmin-1)*kH -dH)*kW + (jmax-jmin+1)*dW)*nInputPlane
- 1st slice : top-left coordinates : (imin+1,jmin) . Pixel is in coordinates (3,4, position 20) of the slice.
- 2nd slice : top-left coordinates : (imin+1,jmin+1). Pixel is in coordinates (3,3, position 19) of the slice.
- 3rd slice : top-left coordinates : (imin+1,jmin+2). Pixel is in coordinates (3,2, position 18) of the slice.
- 4th slice : top-left coordinates : (imin+1,jmin+2). Pixel is in coordinates (3,1, position 17) of the slice.
- 5th slice : top-left coordinates : (imin+1,jmin+2). Pixel is in coordinates (3,0, position 16) of the slice.
- ...
In case the pixel (pixi,pixj) is in the zero-padding, we fill the slice with zeros.
addPixelsInSlices is the same, except we read the contents of the array instead of writing.
*/
__global__ void copyPixelsInSlices(float *ptrinput0, float *ptrkslices0,
int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int inputstr0, int kslicesstr0, int batchsize)
{
const int pixi=blockIdx.x;
const int pixj=blockIdx.y;
const int blk =blockDim.x*blockDim.y;
const int tidx=threadIdx.x+blockDim.x*threadIdx.y;
__shared__ int _imin, _jmin, _imax, _jmax, _stridej, _stridei, _ksliceoffset, _inputoffset;
int imin, jmin, imax, jmax;
int stridej, stridei, ksliceoffset, inputoffset;
if(tidx==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
stridej = (kH*kW - dW) * nInputPlane;
stridei = (((size2-jmax+jmin-1)*kH -dH)*kW + (jmax-jmin+1)*dW)*nInputPlane;
ksliceoffset = ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane + kslicesstr0*blockIdx.z;
inputoffset = ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane + inputstr0*blockIdx.z;
_imin=imin;
_jmin=jmin;
_imax=imax;
_jmax=jmax;
_stridej=stridej;
_stridei=stridei;
_ksliceoffset=ksliceoffset;
_inputoffset=inputoffset;
}
__syncthreads();
if(threadIdx.x==0 && threadIdx.y>0)
{
imin=_imin;
jmin=_jmin;
imax=_imax;
jmax=_jmax;
stridej=_stridej;
stridei=_stridei;
ksliceoffset=_ksliceoffset;
inputoffset=_inputoffset;
}
imin=__shfl(imin, 0);
jmin=__shfl(jmin, 0);
imax=__shfl(imax, 0);
jmax=__shfl(jmax, 0);
stridej=__shfl(stridej, 0);
stridei=__shfl(stridei, 0);
ksliceoffset=__shfl(ksliceoffset, 0);
inputoffset=__shfl(inputoffset, 0);
int i;
int j;
int k;
bool zeropad=pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
float * ptrinput = ptrinput0 + inputoffset;
float * ptrkslices = ptrkslices0 + ksliceoffset;
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
if(zeropad)
{
for(k=tidx; k<nInputPlane; k+=blk) {
ptrkslices[k]=0;
}
}
else {
for(k=tidx; k<nInputPlane; k+=blk) {
ptrkslices[k]=ptrinput[k];
}
}
ptrkslices += stridej;
}
ptrkslices += stridei;
}
}
__global__ void copyPixelsInSlicesRGB(float *ptrinput0, float *ptrkslices0,
int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int inputstr0, int kslicesstr0, int batchsize)
{
// each block does one pixel of the input image
// each kernel slice is represented by its upper-left coordinates
const int pixi=blockIdx.x;
const int pixj=blockIdx.y*blockDim.y + threadIdx.y;
const int tidx=threadIdx.x;
const int batchindex=blockIdx.z*blockDim.z+threadIdx.z;
int i,j;
int imin, jmin, imax, jmax;
int inputoffset, ksliceoffset;
// step 1 : find which kernel slices contain the values of the pixel
__shared__ int _imin, _jmin[32], _imax, _jmax[32], _inputoffset[32][3], _ksliceoffset[32][3];
if(threadIdx.z==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
if(threadIdx.x==0 && threadIdx.y==0)
{
_imin=imin;
_imax=imax;
}
if(threadIdx.x==0)
{
_jmin[threadIdx.y]=jmin;
_jmax[threadIdx.y]=jmax;
}
inputoffset = inputstr0*blockIdx.z*blockDim.z + ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane ;
ksliceoffset= kslicesstr0*blockIdx.z*blockDim.z + ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane;
_inputoffset[threadIdx.y][threadIdx.x]=inputoffset;
_ksliceoffset[threadIdx.y][threadIdx.x]=ksliceoffset;
}
__syncthreads();
if(batchindex >= batchsize) return;
if(pixj > isize2 + padleft + padright -1) return;
if(threadIdx.z>0)
{
imin=_imin;
imax=_imax;
jmin=_jmin[threadIdx.y];
jmax=_jmax[threadIdx.y];
inputoffset=_inputoffset[threadIdx.y][threadIdx.x];
ksliceoffset=_ksliceoffset[threadIdx.y][threadIdx.x];
}
// step 2 : move the pointers
// this one goes to where the pixel is at
ptrinput0 += inputoffset+inputstr0*threadIdx.z ;
ptrkslices0 += ksliceoffset+kslicesstr0*threadIdx.z ;
const int stridej = (kH*kW - dW) * nInputPlane;
const int stridei = (size2*kH-dH) * kW *nInputPlane - (jmax-jmin+1) * stridej ;
bool zeropad = pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
// read pixel
// load the stuff first...
//for (b=0; b<batchsize; b++)
//{
float * ptrinput = ptrinput0;
float * ptrkslices = ptrkslices0;
float pixvalue;
if (zeropad) {
pixvalue=0;
}
else {
pixvalue=ptrinput[tidx];
}
// write to memory
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
if(zeropad)
{
ptrkslices[tidx]=0;
}
else {
ptrkslices[tidx]=pixvalue;
}
ptrkslices += stridej;
}
ptrkslices += stridei;
}
//}
}
__global__ void addPixelsInSlices(float *ptrgradinput0, float *ptrkslices0,
int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int gradinputstr0, int kslicesstr0, int batchsize)
{
const int pixi=blockIdx.x;
const int pixj=blockIdx.y;
const int blk =blockDim.x*blockDim.y;
const int tidx=threadIdx.x+blockDim.x*threadIdx.y;
bool zeropad=pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
if(zeropad) return;
__shared__ int _imin, _jmin, _imax, _jmax, _stridej, _stridei, _ksliceoffset, _gradinputoffset;
int stridej, stridei, ksliceoffset, gradinputoffset;
int imin;
int jmin;
int imax;
int jmax;
if(threadIdx.y==0 && threadIdx.x==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
stridej = (kH*kW - dW) * nInputPlane;
stridei = (((size2-jmax+jmin-1)*kH -dH)*kW + (jmax-jmin+1)*dW)*nInputPlane;
ksliceoffset = ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane + kslicesstr0*blockIdx.z;
gradinputoffset = ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane + gradinputstr0*blockIdx.z;
_imin=imin;
_jmin=jmin;
_imax=imax;
_jmax=jmax;
_stridej=stridej;
_stridei=stridei;
_ksliceoffset=ksliceoffset;
_gradinputoffset=gradinputoffset;
}
__syncthreads();
if(threadIdx.x==0 && threadIdx.y>0)
{
imin=_imin;
jmin=_jmin;
imax=_imax;
jmax=_jmax;
stridej=_stridej;
stridei=_stridei;
ksliceoffset=_ksliceoffset;
gradinputoffset=_gradinputoffset;
}
imin=__shfl(imin, 0);
jmin=__shfl(jmin, 0);
imax=__shfl(imax, 0);
jmax=__shfl(jmax, 0);
stridej=__shfl(stridej, 0);
stridei=__shfl(stridei, 0);
ksliceoffset=__shfl(ksliceoffset, 0);
gradinputoffset=__shfl(gradinputoffset, 0);
int i;
int j;
int k;
for(k=tidx; k<nInputPlane; k+=blk) {
float * ptrgradinput = ptrgradinput0 + gradinputoffset;
float * ptrkslices = ptrkslices0 + ksliceoffset;
float v=0;
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
v += ptrkslices[k];
ptrkslices += stridej;
}
ptrkslices += stridei;
}
ptrgradinput[k] += v;
}
}
__global__ void copyBiasToOutputs(float *ptrbias, float *ptroutput, const int size1, const int size2, const int nOutputPlane, const int linestride, const int imstride)
{
// each thread has a value to manage...
//const int blk =blockDim.x;
const int tidx=blockDim.x*blockIdx.x + threadIdx.x;
const int tidy=blockIdx.y;
const int tidz=blockIdx.z;
float val = ptrbias[tidx];
ptroutput+= tidz*imstride + tidy*linestride;
for(int k=0; k<size2; k++)
{
if(tidx<nOutputPlane) {
ptroutput[k*nOutputPlane+tidx]=val;
}
}
}
void copyBiasVector(THCudaTensor* output, THCudaTensor* bias)
{
float* ptrbias = THCudaTensor_data(NULL, bias);
float* ptroutput = THCudaTensor_data(NULL, output);
int nOutputPlane = bias->size[0];
int batchsize = output->size[0];
int size1 = output->size[1];
int size2 = output->size[2];
// fill output with biases
dim3 blocksbias ((nOutputPlane+31)/32, size1, batchsize);
dim3 threadsbias (32);
copyBiasToOutputs<<<blocksbias, threadsbias>>>(ptrbias, ptroutput, size1, size2, nOutputPlane, output->stride[1], output->stride[0]);
}
void sliceInput(THCudaTensor *input, THCudaTensor* kernelSlices, int kH, int kW, int dH, int dW, int padup, int paddown, int padleft, int padright)
{
// find the size of kernelslices
long batchsize = input->size[0];
long isize1 = input->size[1];
long isize2 = input->size[2];
long nInputPlane = input->size[3];
long size1 = (isize1 - kH + padup + paddown) / dH + 1;
long size2 = (isize2 - kW + padleft + padright) / dW + 1;
float* ptrkslices = THCudaTensor_data(NULL, kernelSlices);
float* ptrinput = THCudaTensor_data(NULL, input);
int inputstr0=input->stride[0];
int kslicesstr0=size1*size2*kW*kH*nInputPlane;
//kernel unfold inputs
if (nInputPlane ==3)
{
dim3 blocksRGB (isize1 + padup + paddown, (isize2 + padleft + padright+31)/32, (batchsize+3)/4);
dim3 threadsRGB (3,32,4);
copyPixelsInSlicesRGB <<<blocksRGB, threadsRGB>>>(ptrinput, ptrkslices,
dH, dW, kH, kW, size1, size2, isize1, isize2, nInputPlane, padleft, padright, padup, paddown, inputstr0, kslicesstr0, batchsize);
}
else
{
int b_y;
if (nInputPlane>1024)
{
b_y=32;
}
else
{
b_y=(nInputPlane+31)/32;
}
dim3 blocks (isize1 + padup + paddown, isize2 + padleft + padright, batchsize);
dim3 threads (32,b_y);
copyPixelsInSlices<<<blocks, threads>>>(ptrinput, ptrkslices,
dH, dW, kH, kW, size1, size2, isize1, isize2, nInputPlane, padleft, padright, padup, paddown, inputstr0, kslicesstr0, batchsize);
}
}
void unsliceGradient(THCudaTensor *backwardSlices, THCudaTensor *gradInput, THCudaTensor *gradOutput, int kH, int kW, int dH, int dW, int padup, int paddown, int padleft, int padright)
{
long batchsize = gradInput->size[0];
long isize1 = gradInput->size[1];
long isize2 = gradInput->size[2];
long nInputPlane = gradInput->size[3];
long size1 = gradOutput->size[1];
long size2 = gradOutput->size[2];
float* ptrbackslices = THCudaTensor_data(NULL, backwardSlices);
float* ptrgradinput = THCudaTensor_data(NULL, gradInput);
int b_y;
if (nInputPlane>1024)
{
b_y=32;
}
else
{
b_y=(nInputPlane+31)/32;
}
dim3 blocks (isize1 + padup + paddown, isize2 + padleft + padright, batchsize);
dim3 threads (32,b_y);
int gradinputstr0=gradInput->stride[0];
int kslicesstr0=size1*size2*kW*kH*nInputPlane;
addPixelsInSlices<<<blocks, threads>>>(ptrgradinput, ptrbackslices,
dH, dW, kH, kW, size1, size2, isize1, isize2, nInputPlane, padleft, padright, padup, paddown, gradinputstr0, kslicesstr0, batchsize);
}
static int cunxn_SpatialConvolutionUnfold_updateOutput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor");
THCudaTensor *kernels = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *bias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "bias", "torch.CudaTensor");
// THCudaTensor *kSlices = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "kernelSlices", "torch.CudaTensor");
long kW = luaT_getfieldcheckint(L, 1, "kW");
long kH = luaT_getfieldcheckint(L, 1, "kH");
long dW = luaT_getfieldcheckint(L, 1, "dW");
long dH = luaT_getfieldcheckint(L, 1, "dH");
long padup = luaT_getfieldcheckint(L, 1, "padtop");
long paddown = luaT_getfieldcheckint(L, 1, "padbottom");
long padleft = luaT_getfieldcheckint(L, 1, "padleft");
long padright = luaT_getfieldcheckint(L, 1, "padright");
long nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
long nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
// input should be contiguous already but... well.
input = THCudaTensor_newContiguous(state, input);
// find the size of kernelslices
long batchsize = input->size[0];
long isize1 = input->size[1];
long isize2 = input->size[2];
long size1 = (isize1 - kH + padup + paddown) / dH + 1;
long size2 = (isize2 - kW + padleft + padright) / dW + 1;
THCudaTensor_resize4d(state, output, batchsize, size1, size2, nOutputPlane);
copyBiasVector(output, bias);
// unfold conv kernels by resizing
THCudaTensor_resize2d(state, kernels, nOutputPlane, kW*kH*nInputPlane);
THCudaTensor_transpose(state, kernels, NULL, 0, 1);
// in case there is not enough memory
size_t freeMem;
THCudaCheck(cudaMemGetInfo (&freeMem, NULL));
int nsplits=1;
while(batchsize/nsplits*size1*size2*kW*kH*nInputPlane * 4 > freeMem)
{
nsplits *= 2;
}
int newbatchsize=(batchsize+nsplits-1)/nsplits;
THCudaTensor* kernelSlices = THCudaTensor_newWithSize2d(state, newbatchsize*size1*size2,kW*kH*nInputPlane);
for(int split=0; split<nsplits; split++)
{
int splitsize=newbatchsize;
if(split*newbatchsize+splitsize > batchsize)
{
splitsize=batchsize-split*newbatchsize;
THCudaTensor_resize2d(state, kernelSlices, splitsize*size1*size2, kW*kH*nInputPlane);
}
THCudaTensor* inputSplit = THCudaTensor_newNarrow(state,input, 0, split*newbatchsize, splitsize);
sliceInput(inputSplit, kernelSlices, kH, kW, dH, dW, padup, paddown, padleft, padright);
THCudaTensor_free(state, inputSplit);
THCudaTensor* outputSplit = THCudaTensor_newNarrow(state, output, 0, split*newbatchsize, splitsize);
// put output in matrix mode
THCudaTensor_resize2d(state, outputSplit, splitsize* size1* size2, nOutputPlane);
// printf("sgemm\n");
THCudaTensor_addmm(state, outputSplit, 1, outputSplit, 1, kernelSlices, kernels);
THCudaTensor_free(state, outputSplit);
}
THCudaTensor_free(state, kernelSlices);
THCudaTensor_transpose(state, kernels, NULL, 0, 1);
THCudaTensor_resize4d(state, kernels, nOutputPlane, kH, kW, nInputPlane);
// check for errors
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in copyPixelsInSlices: %s\n", cudaGetErrorString(err));
THError("aborting");
}
THCudaTensor_resize4d(state, output, batchsize, size1, size2, nOutputPlane);
THCudaTensor_free(state, input);
return 1;
}
static int cunxn_SpatialConvolutionUnfold_updateGradInput(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
long kW = luaT_getfieldcheckint(L, 1, "kW");
long kH = luaT_getfieldcheckint(L, 1, "kH");
long dW = luaT_getfieldcheckint(L, 1, "dW");
long dH = luaT_getfieldcheckint(L, 1, "dH");
long padup = luaT_getfieldcheckint(L, 1, "padtop");
long paddown = luaT_getfieldcheckint(L, 1, "padbottom");
long padleft = luaT_getfieldcheckint(L, 1, "padleft");
long padright = luaT_getfieldcheckint(L, 1, "padright");
long nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
long nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
THCudaTensor *kernels = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "weight", "torch.CudaTensor");
THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor");
THCudaTensor_resize2d(state, kernels, nOutputPlane, kW*kH*nInputPlane);
long batchsize = input->size[0];
long size1 = gradOutput->size[1];
long size2 = gradOutput->size[2];
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_fill(state, gradInput, 0);
// in case there is not enough memory
size_t freeMem;
THCudaCheck(cudaMemGetInfo (&freeMem, NULL));
int nsplits=1;
while(batchsize/nsplits*size1*size2*kW*kH*nInputPlane * 4 > freeMem)
{
nsplits *= 2;
}
int newbatchsize=(batchsize+nsplits-1)/nsplits;
THCudaTensor* backwardSlices = THCudaTensor_newWithSize2d(state, newbatchsize*size1*size2,kW*kH*nInputPlane);
for(int split=0; split<nsplits; split++)
{
int splitsize=newbatchsize;
if(split*newbatchsize+splitsize > batchsize)
{
splitsize=batchsize-split*newbatchsize;
THCudaTensor_resize2d(state, backwardSlices, splitsize*size1*size2, kW*kH*nInputPlane);
}
THCudaTensor* gradOutputSplit = THCudaTensor_newNarrow(state, gradOutput, 0, split*newbatchsize, splitsize);
THCudaTensor* gradInputSplit = THCudaTensor_newNarrow(state, gradInput, 0, split*newbatchsize, splitsize);
THCudaTensor_resize2d(state, gradOutputSplit, splitsize*size1*size2, nOutputPlane);
// backprop gradinput into the slices
THCudaTensor_addmm(state, backwardSlices, 0, backwardSlices, 1, gradOutputSplit, kernels);
THCudaTensor_resize4d(state, gradOutputSplit, splitsize, size1, size2, nOutputPlane);
unsliceGradient(backwardSlices, gradInputSplit, gradOutputSplit, kH, kW, dH, dW, padup, paddown, padleft, padright);
THCudaTensor_free(state, gradInputSplit);
THCudaTensor_free(state, gradOutputSplit);
}
// we resize gradOutput back to what it was...
THCudaTensor_resize4d(state, gradOutput, batchsize, size1, size2, nOutputPlane);
THCudaTensor_resize4d(state, kernels, nOutputPlane, kH, kW, nInputPlane);
THCudaTensor_free(state, backwardSlices);
return 1;
}
static int cunxn_SpatialConvolutionUnfold_accGradParameters(lua_State *L)
{
THCState *state = getCutorchState(L);
THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor");
THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor");
long kW = luaT_getfieldcheckint(L, 1, "kW");
long kH = luaT_getfieldcheckint(L, 1, "kH");
long dW = luaT_getfieldcheckint(L, 1, "dW");
long dH = luaT_getfieldcheckint(L, 1, "dH");
long padup = luaT_getfieldcheckint(L, 1, "padtop");
long paddown = luaT_getfieldcheckint(L, 1, "padbottom");
long padleft = luaT_getfieldcheckint(L, 1, "padleft");
long padright = luaT_getfieldcheckint(L, 1, "padright");
long nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane");
long nInputPlane = luaT_getfieldcheckint(L, 1, "nInputPlane");
float scale = luaL_optnumber(L, 4, 1);
// find the size of kernelslices
long batchsize = gradOutput->size[0];
long size1 = gradOutput->size[1];
long size2 = gradOutput->size[2];
THCudaTensor *gradWeight = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradWeight", "torch.CudaTensor");
THCudaTensor *gradBias = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradBias", "torch.CudaTensor");
float* ptrgradbias = THCudaTensor_data(state, gradBias);
float* ptrgradoutput = THCudaTensor_data(state, gradOutput);
dim3 blocksgradbias (nOutputPlane+31/32);
dim3 threadsgradbias (32,32);
THCudaTensor_resize2d(state, gradWeight, nOutputPlane, kW*kH*nInputPlane);
size_t freeMem;
THCudaCheck(cudaMemGetInfo (&freeMem, NULL));
int nsplits=1;
while(batchsize/nsplits*size1*size2*kW*kH*nInputPlane * 4 > freeMem)
{
nsplits *= 2;
}
int newbatchsize=(batchsize+nsplits-1)/nsplits;
THCudaTensor* kernelSlices = THCudaTensor_newWithSize2d(state, newbatchsize*size1*size2,kW*kH*nInputPlane);
for(int split=0; split<nsplits; split++)
{
int splitsize=newbatchsize;
if(split*newbatchsize+splitsize > batchsize)
{
splitsize=batchsize-split*newbatchsize;
THCudaTensor_resize2d(state, kernelSlices, splitsize*size1*size2, kW*kH*nInputPlane);
}
THCudaTensor* gradOutputSplit = THCudaTensor_newNarrow(state, gradOutput, 0, split*newbatchsize, splitsize);
THCudaTensor_resize2d(state, gradOutputSplit, splitsize*size1* size2, nOutputPlane);
THCudaTensor_transpose(state, gradOutputSplit, NULL, 0, 1);
THCudaTensor* inputSplit = THCudaTensor_newNarrow(state, input, 0, split*newbatchsize, splitsize);
sliceInput(inputSplit, kernelSlices, kH, kW, dH, dW, padup, paddown, padleft, padright);
THCudaTensor_addmm(state, gradWeight, 1, gradWeight, scale, gradOutputSplit, kernelSlices);
THCudaTensor_free(state, inputSplit);
THCudaTensor_free(state, gradOutputSplit);
}
THCudaTensor* gradOutTmp = THCudaTensor_newNarrow(state, gradOutput, 0, 0, batchsize);
THCudaTensor_resize2d(state, gradOutTmp, batchsize*size1*size2, nOutputPlane);
THCudaTensor* ones = THCudaTensor_newWithSize2d(state, 1,batchsize*size1*size2);
THCudaTensor_fill(state, ones, 1);
THCudaTensor_resize2d(state, gradBias, 1, nOutputPlane);
THCudaTensor_addmm(state, gradBias, 1, gradBias, scale, ones, gradOutTmp);
THCudaTensor_resize1d(state, gradBias, nOutputPlane);
THCudaTensor_free(state, ones);
THCudaTensor_free(state, gradOutTmp);
THCudaTensor_resize4d(state, gradWeight, nOutputPlane, kH, kW, nInputPlane);
// we resize gradOutput back to what it was...
THCudaTensor_free(state, kernelSlices);
return 1;
}
static const struct luaL_Reg cunxn_SpatialConvolutionUnfold__ [] = {
{"SpatialConvolutionUnfold_updateOutput", cunxn_SpatialConvolutionUnfold_updateOutput},
{"SpatialConvolutionUnfold_updateGradInput", cunxn_SpatialConvolutionUnfold_updateGradInput},
{"SpatialConvolutionUnfold_accGradParameters", cunxn_SpatialConvolutionUnfold_accGradParameters},
{NULL, NULL}
};
static void cunxn_SpatialConvolutionUnfold_init(lua_State *L)
{
luaT_pushmetatable(L, "torch.CudaTensor");
luaT_registeratname(L, cunxn_SpatialConvolutionUnfold__, "nn");
lua_pop(L,1);
}
|
a74716f8b58501ea537113fddb89eb9a1c89559e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#define GPU
//#define CUDNN
extern "C" {
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
int i;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int m = layer.size*layer.size*layer.n;
int n = layer.h*layer.w;
int k = layer.c;
fill_ongpu(layer.outputs*layer.batch, 0, layer.output_gpu, 1);
for(i = 0; i < layer.batch; ++i){
float *a = layer.weights_gpu;
float *b = state.input + i*layer.c*layer.h*layer.w;
float *c = layer.col_image_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.output_gpu+i*layer.n*size);
}
add_bias_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
activate_array(layer.output_gpu, layer.batch*layer.n*size, layer.activation);
}
extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
float alpha = 1./layer.batch;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int i;
gradient_array(layer.output_gpu, size*layer.n*layer.batch, layer.activation, layer.delta_gpu);
backward_bias(layer.bias_updates_gpu, layer.delta, layer.batch, layer.n, size);
if(state.delta) memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
for(i = 0; i < layer.batch; ++i){
int m = layer.c;
int n = layer.size*layer.size*layer.n;
int k = layer.h*layer.w;
float *a = state.input + i*m*n;
float *b = layer.col_image_gpu;
float *c = layer.weight_updates_gpu;
im2col_ongpu(layer.delta_gpu + i*layer.n*size, layer.n, out_h, out_w,
layer.size, layer.stride, 0, b);
gemm_ongpu(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
if(state.delta){
int m = layer.c;
int n = layer.h*layer.w;
int k = layer.size*layer.size*layer.n;
float *a = layer.weights_gpu;
float *b = layer.col_image_gpu;
float *c = state.delta + i*n*m;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
extern "C" void pull_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void push_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
axpy_ongpu(size, -decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
| a74716f8b58501ea537113fddb89eb9a1c89559e.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#define GPU
//#define CUDNN
extern "C" {
#include "convolutional_layer.h"
#include "deconvolutional_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
extern "C" void forward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
int i;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int m = layer.size*layer.size*layer.n;
int n = layer.h*layer.w;
int k = layer.c;
fill_ongpu(layer.outputs*layer.batch, 0, layer.output_gpu, 1);
for(i = 0; i < layer.batch; ++i){
float *a = layer.weights_gpu;
float *b = state.input + i*layer.c*layer.h*layer.w;
float *c = layer.col_image_gpu;
gemm_ongpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
col2im_ongpu(c, layer.n, out_h, out_w, layer.size, layer.stride, 0, layer.output_gpu+i*layer.n*size);
}
add_bias_gpu(layer.output_gpu, layer.biases_gpu, layer.batch, layer.n, size);
activate_array(layer.output_gpu, layer.batch*layer.n*size, layer.activation);
}
extern "C" void backward_deconvolutional_layer_gpu(deconvolutional_layer layer, network_state state)
{
float alpha = 1./layer.batch;
int out_h = deconvolutional_out_height(layer);
int out_w = deconvolutional_out_width(layer);
int size = out_h*out_w;
int i;
gradient_array(layer.output_gpu, size*layer.n*layer.batch, layer.activation, layer.delta_gpu);
backward_bias(layer.bias_updates_gpu, layer.delta, layer.batch, layer.n, size);
if(state.delta) memset(state.delta, 0, layer.batch*layer.h*layer.w*layer.c*sizeof(float));
for(i = 0; i < layer.batch; ++i){
int m = layer.c;
int n = layer.size*layer.size*layer.n;
int k = layer.h*layer.w;
float *a = state.input + i*m*n;
float *b = layer.col_image_gpu;
float *c = layer.weight_updates_gpu;
im2col_ongpu(layer.delta_gpu + i*layer.n*size, layer.n, out_h, out_w,
layer.size, layer.stride, 0, b);
gemm_ongpu(0,1,m,n,k,alpha,a,k,b,k,1,c,n);
if(state.delta){
int m = layer.c;
int n = layer.h*layer.w;
int k = layer.size*layer.size*layer.n;
float *a = layer.weights_gpu;
float *b = layer.col_image_gpu;
float *c = state.delta + i*n*m;
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
}
}
}
extern "C" void pull_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_pull_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.biases_gpu, layer.biases, layer.n);
cuda_pull_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_pull_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void push_deconvolutional_layer(deconvolutional_layer layer)
{
cuda_push_array(layer.weights_gpu, layer.weights, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.biases_gpu, layer.biases, layer.n);
cuda_push_array(layer.weight_updates_gpu, layer.weight_updates, layer.c*layer.n*layer.size*layer.size);
cuda_push_array(layer.bias_updates_gpu, layer.bias_updates, layer.n);
}
extern "C" void update_deconvolutional_layer_gpu(deconvolutional_layer layer, float learning_rate, float momentum, float decay)
{
int size = layer.size*layer.size*layer.c*layer.n;
axpy_ongpu(layer.n, learning_rate, layer.bias_updates_gpu, 1, layer.biases_gpu, 1);
scal_ongpu(layer.n, momentum, layer.bias_updates_gpu, 1);
axpy_ongpu(size, -decay, layer.weights_gpu, 1, layer.weight_updates_gpu, 1);
axpy_ongpu(size, learning_rate, layer.weight_updates_gpu, 1, layer.weights_gpu, 1);
scal_ongpu(size, momentum, layer.weight_updates_gpu, 1);
}
|
99b9954d25990942fdcac2d445b535950ca5cf13.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
| 99b9954d25990942fdcac2d445b535950ca5cf13.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxPoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data,
int* mask, Dtype* top_mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
Dtype maxval = -FLT_MAX;
int maxidx = -1;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
}
}
template <typename Dtype>
__global__ void AvePoolForward(const int nthreads, const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
Dtype aveval = 0;
bottom_data += (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
template <typename Dtype>
__global__ void StoPoolForwardTrain(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* rand_idx, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
Dtype cumsum = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
}
}
float thres = rand_idx[index] * cumsum;
// Second pass: get value, and set index.
cumsum = 0;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
if (cumsum >= thres) {
rand_idx[index] = ((n * channels + c) * height + h) * width + w;
top_data[index] = bottom_data[h * width + w];
return;
}
}
}
}
}
template <typename Dtype>
__global__ void StoPoolForwardTest(const int nthreads,
const Dtype* bottom_data,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h;
int hend = min(hstart + kernel_h, height);
int wstart = pw * stride_w;
int wend = min(wstart + kernel_w, width);
// We set cumsum to be 0 to avoid divide-by-zero problems
Dtype cumsum = FLT_MIN;
Dtype cumvalues = 0.;
bottom_data += (n * channels + c) * height * width;
// First pass: get sum
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
cumsum += bottom_data[h * width + w];
cumvalues += bottom_data[h * width + w] * bottom_data[h * width + w];
}
}
top_data[index] = cumvalues / cumsum;
}
}
template <typename Dtype>
Dtype PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
int count = (*top)[0]->count();
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top->size() > 1;
int* mask = NULL;
Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = (*top)[1]->mutable_gpu_data();
} else {
mask = max_idx_.mutable_gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data,
mask, top_mask);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
if (Caffe::phase() == Caffe::TRAIN) {
// We need to create the random index as well.
caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1),
rand_idx_.mutable_gpu_data());
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_,
rand_idx_.mutable_gpu_data(), top_data);
} else {
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, bottom[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, top_data);
}
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
return Dtype(0.);
}
template <typename Dtype>
__global__ void MaxPoolBackward(const int nthreads, const Dtype* top_diff,
const int* mask, const Dtype* top_mask, const int num, const int channels,
const int height, const int width, const int pooled_height,
const int pooled_width, const int kernel_h, const int kernel_w,
const int stride_h, const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart =
(h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1;
int phend = min((h + pad_h) / stride_h + 1, pooled_height);
int pwstart =
(w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1;
int pwend = min((w + pad_w) / stride_w + 1, pooled_width);
Dtype gradient = 0;
int offset = (n * channels + c) * pooled_height * pooled_width;
top_diff += offset;
if (mask) {
mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
} else {
top_mask += offset;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (top_mask[ph * pooled_width + pw] == h * width + w) {
gradient += top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void AvePoolBackward(const int nthreads, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, const int pad_h, const int pad_w,
Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width + pad_w;
int h = (index / width) % height + pad_h;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
int pool_size = (hend - hstart) * (wend - wstart);
gradient += top_diff[ph * pooled_width + pw] / pool_size;
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
__global__ void StoPoolBackward(const int nthreads,
const Dtype* rand_idx, const Dtype* top_diff,
const int num, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const int kernel_h, const int kernel_w, const int stride_h,
const int stride_w, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
int phend = min(h / stride_h + 1, pooled_height);
int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
int pwend = min(w / stride_w + 1, pooled_width);
Dtype gradient = 0;
rand_idx += (n * channels + c) * pooled_height * pooled_width;
top_diff += (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
gradient += top_diff[ph * pooled_width + pw] *
(index == static_cast<int>(rand_idx[ph * pooled_width + pw]));
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
const int count = (*bottom)[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
// We'll output the mask to top[1] if it's of size >1.
const bool use_top_mask = top.size() > 1;
const int* mask = NULL;
const Dtype* top_mask = NULL;
switch (this->layer_param_.pooling_param().pool()) {
case PoolingParameter_PoolMethod_MAX:
if (use_top_mask) {
top_mask = top[1]->gpu_data();
} else {
mask = max_idx_.gpu_data();
}
// NOLINT_NEXT_LINE(whitespace/operators)
MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, top_mask, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_,
kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_,
bottom_diff);
break;
case PoolingParameter_PoolMethod_AVE:
// NOLINT_NEXT_LINE(whitespace/operators)
AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top[0]->num(), channels_,
height_, width_, pooled_height_, pooled_width_, kernel_h_,
kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff);
break;
case PoolingParameter_PoolMethod_STOCHASTIC:
// NOLINT_NEXT_LINE(whitespace/operators)
StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, rand_idx_.gpu_data(), top_diff,
top[0]->num(), channels_, height_, width_, pooled_height_,
pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_,
bottom_diff);
break;
default:
LOG(FATAL) << "Unknown pooling method.";
}
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(PoolingLayer);
} // namespace caffe
|
9e3b5f2357db52636b9f3013d6a4ed03f3319a20.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "weight_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int Q>
static __global__ void
magma_weight_3d_kernel(const T *dqweight1d, T *dV, const int v_stride, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
T rV[1][1][Q]; // allocate with DIM=NCOMP=1, but sizes may differ for a fused operator
// global memory pointers
dV += elem_id * v_stride;
// shared memory pointers
T* sTweight = (T*)shared_data;
// read dqweight_1d
if (tx < Q) {
sTweight[tx] = dqweight1d[tx];
}
__syncthreads();
magma_weight_3d_device<T, 1, 1, Q, 0, 0>(sTweight, rV, tx);
// write V
if (tx < (Q*Q)) {
for(int j = 0; j < Q; j++) {
dV[ j*(Q*Q) + tx ] = rV[0][0][j];
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int Q>
static magma_int_t
magma_weight_3d_kernel_driver(
const T *dqweight1d, T *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
magma_int_t nthreads = (Q*Q);
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * Q; // for dqweight1d
hipDeviceGetAttribute (&nthreads_max, hipDeviceAttributeMaxThreadsPerBlock, device);
#if TORCH_HIP_VERSION >= 9000
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeSharedMemPerBlockOptin, device);
if (shmem <= shmem_max) {
hipFuncSetAttribute(magma_weight_3d_kernel<T, Q>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeMaxSharedMemoryPerBlock, device);
#endif // TORCH_HIP_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
hipLaunchKernelGGL(( magma_weight_3d_kernel<T, Q>), dim3(grid), dim3(threads), shmem, magma_queue_get_cuda_stream(queue),
dqweight1d, dV, v_stride, nelem);
return (hipPeekAtLastError() == hipSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_weight_3d_q(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 1>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 2>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 3>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 4>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 5>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 6>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 7>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 8>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 9>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar,10>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_weight_3d(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_weight_3d_q(Q, dqweight1d, dV, v_stride, nelem, maxthreads, queue);
return launch_failed;
}
| 9e3b5f2357db52636b9f3013d6a4ed03f3319a20.cu | // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <cuda.h> // for CUDA_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "weight_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int Q>
static __global__ void
magma_weight_3d_kernel(const T *dqweight1d, T *dV, const int v_stride, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
T rV[1][1][Q]; // allocate with DIM=NCOMP=1, but sizes may differ for a fused operator
// global memory pointers
dV += elem_id * v_stride;
// shared memory pointers
T* sTweight = (T*)shared_data;
// read dqweight_1d
if (tx < Q) {
sTweight[tx] = dqweight1d[tx];
}
__syncthreads();
magma_weight_3d_device<T, 1, 1, Q, 0, 0>(sTweight, rV, tx);
// write V
if (tx < (Q*Q)) {
for(int j = 0; j < Q; j++) {
dV[ j*(Q*Q) + tx ] = rV[0][0][j];
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int Q>
static magma_int_t
magma_weight_3d_kernel_driver(
const T *dqweight1d, T *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
magma_int_t nthreads = (Q*Q);
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * Q; // for dqweight1d
cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device);
#if CUDA_VERSION >= 9000
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device);
if (shmem <= shmem_max) {
cudaFuncSetAttribute(magma_weight_3d_kernel<T, Q>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device);
#endif // CUDA_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
magma_weight_3d_kernel<T, Q><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>>
(dqweight1d, dV, v_stride, nelem);
return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_weight_3d_q(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 1>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 2>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 3>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 4>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 5>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 6>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 7>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 8>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar, 9>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_weight_3d_kernel_driver<CeedScalar,10>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_weight_3d(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_weight_3d_q(Q, dqweight1d, dV, v_stride, nelem, maxthreads, queue);
return launch_failed;
}
|
fc60f7f30827d7a453454e0ef0fa0144f92b6f77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "model.h"
Model::Model(float learning_rate){
this->learning_rate = learning_rate;
hipblasCreate(&handle);
};
Model::~Model(){}
// Feed one sample
float Model::feed(float data[28][28], int label, bool isTrain){
// should timing here
forward(data);
conv_2.backward_reset();
Loss::softmax(conv_2.d_output, conv_2.output, label, 10);
float err;
hipblasSnrm2(handle, 10, conv_2.d_output, 1, &err);
if(isTrain)
backward();
return err;
};
// Forward
void Model::forward(float data[28][28]){
// reset
conv_1.forward_reset();
sigm_1.forward_reset();
conv_2.forward_reset();
// sigm_2.forward_reset();
// conv_3.forward_reset();
// forward
input_layer.forward(*data);
conv_1.forward(input_layer.output);
sigm_1.forward(conv_1.output);
conv_2.forward(sigm_1.output);
// sigm_2.forward(conv_2.output);
// conv_3.forward(conv_2.output);
};
// Backward
void Model::backward(){
// reset (Do not reset last layer)
conv_1.backward_reset();
sigm_1.backward_reset();
// conv_2.backward_reset();
// sigm_2.backward_reset();
// backward
// conv_3.backward(conv_2.output, conv_2.d_output);
// sigm_2.backward(conv_2.d_output);
conv_2.backward(sigm_1.output, sigm_1.d_output);
sigm_1.backward(conv_1.d_output);
conv_1.backward(input_layer.output, NULL);
apply_grad(conv_1.weight, conv_1.d_weight, conv_1.weight_dim);
apply_grad(conv_2.weight, conv_2.d_weight, conv_2.weight_dim);
// apply_grad(conv_3.weight, conv_3.d_weight, conv_3.weight_dim);
// apply_grad(conv_4.weight, conv_4.d_weight, conv_4.weight_dim);
};
void Model::apply_grad(float* weight, float* d_weight, int N)
{
hipLaunchKernelGGL(( cuda_apply_grad), dim3(64),dim3(64), 0, 0, weight, d_weight, N, learning_rate);
}
__global__ void cuda_apply_grad(float* weight, float* d_weight, int N, float learning_rate)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
weight[idx] += learning_rate * d_weight[idx];
}
}
// Predict the class of input sample
int Model::predict(float data[28][28])
{
forward(data);
int predLabel = 0;
float outputVec[10];
hipMemcpy(outputVec, conv_2.output, sizeof(float) * 10, hipMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (outputVec[predLabel] < outputVec[i]) {
predLabel = i;
}
}
return predLabel;
}
| fc60f7f30827d7a453454e0ef0fa0144f92b6f77.cu | #include "model.h"
Model::Model(float learning_rate){
this->learning_rate = learning_rate;
cublasCreate(&handle);
};
Model::~Model(){}
// Feed one sample
float Model::feed(float data[28][28], int label, bool isTrain){
// should timing here
forward(data);
conv_2.backward_reset();
Loss::softmax(conv_2.d_output, conv_2.output, label, 10);
float err;
cublasSnrm2(handle, 10, conv_2.d_output, 1, &err);
if(isTrain)
backward();
return err;
};
// Forward
void Model::forward(float data[28][28]){
// reset
conv_1.forward_reset();
sigm_1.forward_reset();
conv_2.forward_reset();
// sigm_2.forward_reset();
// conv_3.forward_reset();
// forward
input_layer.forward(*data);
conv_1.forward(input_layer.output);
sigm_1.forward(conv_1.output);
conv_2.forward(sigm_1.output);
// sigm_2.forward(conv_2.output);
// conv_3.forward(conv_2.output);
};
// Backward
void Model::backward(){
// reset (Do not reset last layer)
conv_1.backward_reset();
sigm_1.backward_reset();
// conv_2.backward_reset();
// sigm_2.backward_reset();
// backward
// conv_3.backward(conv_2.output, conv_2.d_output);
// sigm_2.backward(conv_2.d_output);
conv_2.backward(sigm_1.output, sigm_1.d_output);
sigm_1.backward(conv_1.d_output);
conv_1.backward(input_layer.output, NULL);
apply_grad(conv_1.weight, conv_1.d_weight, conv_1.weight_dim);
apply_grad(conv_2.weight, conv_2.d_weight, conv_2.weight_dim);
// apply_grad(conv_3.weight, conv_3.d_weight, conv_3.weight_dim);
// apply_grad(conv_4.weight, conv_4.d_weight, conv_4.weight_dim);
};
void Model::apply_grad(float* weight, float* d_weight, int N)
{
cuda_apply_grad<<<64,64>>>(weight, d_weight, N, learning_rate);
}
__global__ void cuda_apply_grad(float* weight, float* d_weight, int N, float learning_rate)
{
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
weight[idx] += learning_rate * d_weight[idx];
}
}
// Predict the class of input sample
int Model::predict(float data[28][28])
{
forward(data);
int predLabel = 0;
float outputVec[10];
cudaMemcpy(outputVec, conv_2.output, sizeof(float) * 10, cudaMemcpyDeviceToHost);
for (int i = 1; i < 10; ++i) {
if (outputVec[predLabel] < outputVec[i]) {
predLabel = i;
}
}
return predLabel;
}
|
j2d9pt-512-9-256_host.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "j2d9pt-512-9-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| j2d9pt-512-9-256_host.cu | #include <assert.h>
#include <stdio.h>
#include "j2d9pt-512-9-256_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 18
#define BENCH_RAD 2
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 5 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] +
12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] +
9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118;
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
b916e2fd33632f060bbe5987c47b225e7efb96be.hip | // !!! This is a file automatically generated by hipify!!!
#include "sgd_trainer_cudaops.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <functional>
#include "cuda/cuda_util.cuh"
using namespace linalg::cuda;
__global__ void weight_input_kernel(const float* in, const float* weights,
const float* biases, float* out,
unsigned input_size, dim3 size)
{
const dim3 pos = current_pos_cubic();
if (check_pos_cubic(pos, size))
{
const float* in_mat = in + (pos.x * input_size);
out[index_cubic(pos, size)] = mat_mul_case0_helper(in_mat, weights, pos.y, pos.z, 1, input_size, size.z)
+ biases[pos.z];
}
}
void annlib::cuda::cuda_weight_input(const mat_arr& input_rv,
const mat_arr& weights_noarr,
const mat_arr& biases_rv_noarr,
mat_arr* output_rv)
{
prepare_launch_cubic(*output_rv, [&](dim3 size, dim3 threads, dim3 blocks)
{
hipLaunchKernelGGL(( weight_input_kernel) , dim3(blocks), dim3(threads) , 0, 0, input_rv.dev_start(),
weights_noarr.dev_start(),
biases_rv_noarr.dev_start(),
output_rv->dev_start(),
input_rv.cols, size);
});
}
__global__ void backprop_error_kernel(const float* error_next_layer,
const float* weights_next_layer,
const float* act_df,
float* error,
unsigned next_layer_size, dim3 size)
{
const dim3 pos = current_pos_cubic();
if (check_pos_cubic(pos, size))
{
const float* error_next_layer_mat = error_next_layer + (pos.x * next_layer_size);
const unsigned out_index = index_cubic(pos, size);
error[out_index] = mat_mul_case2_helper(error_next_layer_mat, weights_next_layer,
pos.y, pos.z, 1, next_layer_size, size.z)
* act_df[out_index];
}
}
void annlib::cuda::cuda_backprop_error(const mat_arr& error_next_layer_rv,
const mat_arr& weights_next_layer_noarr,
const mat_arr& act_df_rv,
mat_arr* error_rv)
{
prepare_launch_cubic(*error_rv, [&](dim3 size, dim3 threads, dim3 blocks)
{
backprop_error_kernel << < blocks, threads >> >(error_next_layer_rv.dev_start(),
weights_next_layer_noarr.dev_start(),
act_df_rv.dev_start(),
error_rv->dev_start(),
error_next_layer_rv.cols, size);
});
}
__global__ void calculate_gradient_weight_kernel(const float* prev_act, const float* error, float* grad,
unsigned prev_act_mat_size, unsigned error_mat_size,
unsigned mini_batch_size, dim3 size)
{
const dim3 pos = current_pos_cubic();
if (check_pos_cubic(pos, size))
{
float temp = 0.0f;
for (unsigned i = 0; i < mini_batch_size; i++)
{
const float* prev_act_mat = prev_act + i * prev_act_mat_size;
const float* error_mat = error + i * error_mat_size;
temp += mat_mul_case1_helper(prev_act_mat, error_mat, pos.y, pos.z, prev_act_mat_size, 1, error_mat_size);
}
grad[index_cubic(pos, size)] = temp / static_cast<float>(mini_batch_size);
}
}
void annlib::cuda::cuda_calculate_gradient_weight(const mat_arr& previous_activation_rv,
const mat_arr& error_rv,
mat_arr* gradient_weight_noarr)
{
prepare_launch_cubic(*gradient_weight_noarr, [&](dim3 size, dim3 threads, dim3 blocks)
{
calculate_gradient_weight_kernel << < blocks, threads >> >(previous_activation_rv.dev_start(),
error_rv.dev_start(),
gradient_weight_noarr->dev_start(),
previous_activation_rv.cols, error_rv.cols,
previous_activation_rv.count, size);
});
}
__global__ void calculate_gradient_bias_kernel(const float* error, float* grad, unsigned mini_batch_size, unsigned size)
{
const unsigned pos = current_pos_linear();
if (pos < size)
{
float temp = 0.0f;
for (unsigned i = 0; i < mini_batch_size; i++)
{
const float* error_mat = error + i * size;
temp += error_mat[pos];
}
grad[pos] = temp / static_cast<float>(mini_batch_size);
}
}
void annlib::cuda::cuda_calculate_gradient_bias(const mat_arr& error_rv,
mat_arr* gradient_bias_noarr)
{
prepare_launch_linear(*gradient_bias_noarr, [&](unsigned size, unsigned threads, unsigned blocks)
{
calculate_gradient_bias_kernel << < blocks, threads >> >(error_rv.dev_start(),
gradient_bias_noarr->dev_start(),
error_rv.count, size);
});
}
| b916e2fd33632f060bbe5987c47b225e7efb96be.cu | #include "sgd_trainer_cudaops.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <functional>
#include "cuda/cuda_util.cuh"
using namespace linalg::cuda;
__global__ void weight_input_kernel(const float* in, const float* weights,
const float* biases, float* out,
unsigned input_size, dim3 size)
{
const dim3 pos = current_pos_cubic();
if (check_pos_cubic(pos, size))
{
const float* in_mat = in + (pos.x * input_size);
out[index_cubic(pos, size)] = mat_mul_case0_helper(in_mat, weights, pos.y, pos.z, 1, input_size, size.z)
+ biases[pos.z];
}
}
void annlib::cuda::cuda_weight_input(const mat_arr& input_rv,
const mat_arr& weights_noarr,
const mat_arr& biases_rv_noarr,
mat_arr* output_rv)
{
prepare_launch_cubic(*output_rv, [&](dim3 size, dim3 threads, dim3 blocks)
{
weight_input_kernel <<< blocks, threads >>>(input_rv.dev_start(),
weights_noarr.dev_start(),
biases_rv_noarr.dev_start(),
output_rv->dev_start(),
input_rv.cols, size);
});
}
__global__ void backprop_error_kernel(const float* error_next_layer,
const float* weights_next_layer,
const float* act_df,
float* error,
unsigned next_layer_size, dim3 size)
{
const dim3 pos = current_pos_cubic();
if (check_pos_cubic(pos, size))
{
const float* error_next_layer_mat = error_next_layer + (pos.x * next_layer_size);
const unsigned out_index = index_cubic(pos, size);
error[out_index] = mat_mul_case2_helper(error_next_layer_mat, weights_next_layer,
pos.y, pos.z, 1, next_layer_size, size.z)
* act_df[out_index];
}
}
void annlib::cuda::cuda_backprop_error(const mat_arr& error_next_layer_rv,
const mat_arr& weights_next_layer_noarr,
const mat_arr& act_df_rv,
mat_arr* error_rv)
{
prepare_launch_cubic(*error_rv, [&](dim3 size, dim3 threads, dim3 blocks)
{
backprop_error_kernel << < blocks, threads >> >(error_next_layer_rv.dev_start(),
weights_next_layer_noarr.dev_start(),
act_df_rv.dev_start(),
error_rv->dev_start(),
error_next_layer_rv.cols, size);
});
}
__global__ void calculate_gradient_weight_kernel(const float* prev_act, const float* error, float* grad,
unsigned prev_act_mat_size, unsigned error_mat_size,
unsigned mini_batch_size, dim3 size)
{
const dim3 pos = current_pos_cubic();
if (check_pos_cubic(pos, size))
{
float temp = 0.0f;
for (unsigned i = 0; i < mini_batch_size; i++)
{
const float* prev_act_mat = prev_act + i * prev_act_mat_size;
const float* error_mat = error + i * error_mat_size;
temp += mat_mul_case1_helper(prev_act_mat, error_mat, pos.y, pos.z, prev_act_mat_size, 1, error_mat_size);
}
grad[index_cubic(pos, size)] = temp / static_cast<float>(mini_batch_size);
}
}
void annlib::cuda::cuda_calculate_gradient_weight(const mat_arr& previous_activation_rv,
const mat_arr& error_rv,
mat_arr* gradient_weight_noarr)
{
prepare_launch_cubic(*gradient_weight_noarr, [&](dim3 size, dim3 threads, dim3 blocks)
{
calculate_gradient_weight_kernel << < blocks, threads >> >(previous_activation_rv.dev_start(),
error_rv.dev_start(),
gradient_weight_noarr->dev_start(),
previous_activation_rv.cols, error_rv.cols,
previous_activation_rv.count, size);
});
}
__global__ void calculate_gradient_bias_kernel(const float* error, float* grad, unsigned mini_batch_size, unsigned size)
{
const unsigned pos = current_pos_linear();
if (pos < size)
{
float temp = 0.0f;
for (unsigned i = 0; i < mini_batch_size; i++)
{
const float* error_mat = error + i * size;
temp += error_mat[pos];
}
grad[pos] = temp / static_cast<float>(mini_batch_size);
}
}
void annlib::cuda::cuda_calculate_gradient_bias(const mat_arr& error_rv,
mat_arr* gradient_bias_noarr)
{
prepare_launch_linear(*gradient_bias_noarr, [&](unsigned size, unsigned threads, unsigned blocks)
{
calculate_gradient_bias_kernel << < blocks, threads >> >(error_rv.dev_start(),
gradient_bias_noarr->dev_start(),
error_rv.count, size);
});
}
|
0276522d38628ce1e23e8c5607fd95fc6be119b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<stdio.h>
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
__global__ void evalJulia(double *d_pixel,
double *d_temp){
int x_index = threadIdx.x + 2*threadIdx.y + 4*(blockIdx.x + blockIdx.y);
d_temp[x_index] = d_pixel[x_index];
}
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main()
{
double *d_pixel;
double *d_temp;
const int size = 16*16;
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
double *temp = new double(size);
double *h_temp = new double(size);
for (int y=0;y<16;y++)
for(int x=0;x<16;x++)
{
temp[x + 16*y] = x + 16*y;
std::cout<<temp[x+16*y]<<std::endl;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
std::cout<<"test begins"<<std::endl;
dim3 threadsPerBlock(2,2);
dim3 numBlocks(8,8);
hipMalloc((void**)&d_pixel, size);
hipMalloc((void**)&d_temp, size);
//hipMemcpy(temp, d_pixel, size, hipMemcpyHostToDevice);
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//evalJulia<<<numBlocks,threadsPerBlock>>>(d_pixel, d_temp);
//hipMemcpy(h_temp, d_temp, size, hipMemcpyDeviceToHost);
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/* for (int y=0;y<2048;y++)
for(int x=0;x<2048;x++)
{std::cout<<temp[x+2048*y]<<std::endl;}*/
std::cout<<"last kernel thread printed"<<std::endl;
hipFree(d_pixel);
hipFree(d_temp);
delete(h_temp);
delete(temp);
return 0;
}
| 0276522d38628ce1e23e8c5607fd95fc6be119b1.cu | #include<iostream>
#include<stdio.h>
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
__global__ void evalJulia(double *d_pixel,
double *d_temp){
int x_index = threadIdx.x + 2*threadIdx.y + 4*(blockIdx.x + blockIdx.y);
d_temp[x_index] = d_pixel[x_index];
}
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main()
{
double *d_pixel;
double *d_temp;
const int size = 16*16;
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
double *temp = new double(size);
double *h_temp = new double(size);
for (int y=0;y<16;y++)
for(int x=0;x<16;x++)
{
temp[x + 16*y] = x + 16*y;
std::cout<<temp[x+16*y]<<std::endl;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
std::cout<<"test begins"<<std::endl;
dim3 threadsPerBlock(2,2);
dim3 numBlocks(8,8);
cudaMalloc((void**)&d_pixel, size);
cudaMalloc((void**)&d_temp, size);
//cudaMemcpy(temp, d_pixel, size, cudaMemcpyHostToDevice);
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//evalJulia<<<numBlocks,threadsPerBlock>>>(d_pixel, d_temp);
//cudaMemcpy(h_temp, d_temp, size, cudaMemcpyDeviceToHost);
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/* for (int y=0;y<2048;y++)
for(int x=0;x<2048;x++)
{std::cout<<temp[x+2048*y]<<std::endl;}*/
std::cout<<"last kernel thread printed"<<std::endl;
cudaFree(d_pixel);
cudaFree(d_temp);
delete(h_temp);
delete(temp);
return 0;
}
|
148f264293375a0f96db67f161eb0bde9775152b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Functions for matrix operations
*/
#include "../include/Matrix.cuh"
unsigned int countBlocks(unsigned int a, unsigned int b) {
unsigned int num;
num = a / b;
if (a < b || a % b > 0)
num++;
return num;
}
void printMatrix(int m, int n, float*A, int lda, const char* name)
{
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
float Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
//printf("%s[%d] = %f\n", name, row + col*lda, Areg);
}
}
}
void shift_Input_vec( InputSequences *inputVector, int uIndex)
{
float temp[HORIZON]= { };
for(int i = 0; i < HORIZON - 1; i++){
temp[i] = inputVector[i+1].InputSeq[uIndex];
}
temp[HORIZON - 1] = inputVector[HORIZON - 1].InputSeq[uIndex];
for(int i = 0; i < HORIZON; i++){
inputVector[i].InputSeq[uIndex] = temp[i];
}
}
__global__ void SetUpIdentity_Matrix(float *IdMat)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
if(threadIdx.x == blockIdx.x)
{
IdMat[id] = 1.0f;
//values[threadIdx.x] = 1.0f;
}else{
IdMat[id] = 0.0f;
//values[threadIdx.x] = 0.0f;
}
__syncthreads();
}
__global__ void SetUpIdentity_Matrix_overThreadLimit(float *IdMat, int Ydimention)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = iy * Ydimention + ix;
if(ix == iy)
{
IdMat[id] = 1.0f;
//values[threadIdx.x] = 1.0f;
}else{
IdMat[id] = 0.0f;
//values[threadIdx.x] = 0.0f;
}
__syncthreads();
}
void GetInvMatrix(float *invMat, float *originMat, int num)
{
hipblasHandle_t cublas_status;
CHECK_CUBLAS(hipblasCreate(&cublas_status),"Failed to initialize cuBLAS");
float **arrayA;
float **arrayC;
float *d_arrayA;
float *d_arrayC;
int *d_LUPivots;
int *d_LUInfo;
size_t szMat = num * num * sizeof(float);
CHECK_CUDA(hipMalloc(&arrayA, sizeof(float*)), "Failed to allocate arrayA");
CHECK_CUDA(hipMalloc(&arrayC, sizeof(float*)), "Failed to allocate arrayC");
CHECK_CUDA(hipMalloc(&d_arrayA, szMat), "Failed to allocate d_arrayA");
CHECK_CUDA(hipMalloc(&d_arrayC, szMat), "Failed to allocate d_arrayC");
CHECK_CUDA(hipMalloc(&d_LUPivots, sizeof(int)), "Failed to allocate arrayC");
CHECK_CUDA(hipMalloc(&d_LUInfo, sizeof(int)), "Failed to allocate arrayC");
CHECK_CUDA(hipMemcpy(d_arrayA, originMat, szMat, hipMemcpyHostToDevice), "Failed to copy Origin Matrix to d_arrayA");
CHECK_CUDA(hipMemcpy(arrayA, &d_arrayA, sizeof(float*), hipMemcpyHostToDevice), "Failed to copy to arrayA");
CHECK_CUDA(hipMemcpy(arrayC, &d_arrayC, sizeof(float*), hipMemcpyHostToDevice), "Failed to copy to arrayC");
CHECK_CUBLAS(hipblasSgetrfBatched(cublas_status, num, arrayA, num, d_LUPivots, d_LUInfo, 1), "Failed to perform LU decomp operation");
CHECK_CUDA(hipDeviceSynchronize(), "Failed to synchronize after kernel call!");
CHECK_CUBLAS(hipblasSgetriBatched(cublas_status, num, (const float **)arrayA, num, d_LUPivots, arrayC, num, d_LUInfo, 1), "Failed to perform Inverse operation!");
CHECK_CUDA(hipDeviceSynchronize(), "Failed to synchronize after kernel call!");
CHECK_CUDA(hipMemcpy(invMat, d_arrayC, szMat, hipMemcpyDeviceToHost), "Failed to copy to invMat");
CHECK_CUDA(hipFree(arrayA),"Failed to free arrayA");
CHECK_CUDA(hipFree(arrayC),"Failed to free arrayC");
CHECK_CUDA(hipFree(d_arrayA),"Failed to free d_arrayA");
CHECK_CUDA(hipFree(d_arrayC),"Failed to free d_arrayC");
CHECK_CUDA(hipFree(d_LUPivots),"Failed to free d_LUPivots");
CHECK_CUDA(hipFree(d_LUInfo),"Failed to free d_LUInfo");
CHECK_CUBLAS(hipblasDestroy(cublas_status), "Failed to destory cuBLAS");
}
void GetInvMatrixBycuSOLVER(float *invMat, float *Mat, int sz_dim)
{
hipsolverDnHandle_t cusolverH = NULL;
CHECK_CUSOLVER(hipsolverDnCreate(&cusolverH), "Failed to initialize cuSOLVER Handle_t");
// hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR;
hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER;
size_t szMat = sz_dim * sz_dim * sizeof(float);
float *d_arrayA;
float *d_arrayInv;
CHECK_CUDA(hipMalloc(&d_arrayA, szMat), "Failed to allocate arrayA at cuSOLVER");
CHECK_CUDA(hipMalloc(&d_arrayInv, szMat), "Failed to allocate arrayInv at cuSOLVER");
CHECK_CUDA(hipMemcpy(d_arrayA, Mat, szMat, hipMemcpyHostToDevice), "Failed to copy Matrix data to d_arrayA");
int work_size;
float *work_space;
int *devInfo;
CHECK_CUDA(hipMalloc((void**)&devInfo, sizeof(int)), "Failed to allocate devInfo at Func # GetInvMatrixBycuSOLVER #");
dim3 cuSolverBlock(2,2);
dim3 cuSolverGrid((sz_dim + cuSolverBlock.x -1)/ cuSolverBlock.x, (sz_dim + cuSolverBlock.y -1) / cuSolverBlock.y);
CHECK_CUSOLVER(hipsolverDnSpotrf_bufferSize(cusolverH, uplo, sz_dim, d_arrayA, sz_dim, &work_size),"Failed to get work_size at cuSOLVER");
CHECK_CUDA(hipMalloc((void**)&work_space, sizeof(float)*work_size), "Failed to allocate work_space at cuSOLVER");
CHECK_CUSOLVER(hipsolverDnSpotrf(cusolverH, uplo, sz_dim, d_arrayA, sz_dim, work_space, work_size, devInfo), "Failed to Function # hipsolverDnSpotrf #");
if(sz_dim > 1024){
hipLaunchKernelGGL(( SetUpIdentity_Matrix_overThreadLimit), dim3(cuSolverGrid), dim3(cuSolverBlock), 0, 0, d_arrayInv, sz_dim);
}else{
hipLaunchKernelGGL(( SetUpIdentity_Matrix), dim3(sz_dim), dim3(sz_dim), 0, 0, d_arrayInv );
}
CHECK_CUDA(hipDeviceSynchronize(), "Failed to synchronize after kernel call at cuSOLVER!");
CHECK_CUSOLVER(hipsolverDnSpotrs(cusolverH, uplo, sz_dim, sz_dim, d_arrayA, sz_dim, d_arrayInv, sz_dim, devInfo),"Failed to perform Inverse operation at cuSOLVER!");
CHECK_CUDA(hipMemcpy(invMat, d_arrayInv, szMat, hipMemcpyDeviceToHost),"Failed to copy Result at cuSOLVER");
CHECK_CUDA(hipFree(d_arrayA),"Failed to free arrayA at cuSOLVER");
CHECK_CUDA(hipFree(d_arrayInv),"Failed to free arrayI at cuSOLVER");
CHECK_CUDA(hipFree(work_space),"Failed to free work_space at cuSOLVER");
CHECK_CUDA(hipFree(devInfo),"Failed to free devInfo at cuSOLVER");
CHECK_CUSOLVER(hipsolverDnDestroy(cusolverH),"Failed to destory cuSOLVER Handle_t");
}
__global__ void GetResultMatrixProduct( float *ans, float *lmat, float *rmat, const int nx )
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = iy * nx + ix;
if( ix < HORIZON && iy < HORIZON)
{
float el = 0.0f;
for(int i = 0; i < HORIZON; i++)
{
el += lmat[ iy * nx + i] * rmat[ i * nx + ix ];
}
ans[id] = el;
}
__syncthreads();
}
__global__ void multiply_matrix(float *OutMatrix, float voc, float *InMatrix)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
OutMatrix[id] = voc * InMatrix[id];
//printf("OutMatrix[%d] == %f = %f * %f\n",id, OutMatrix[id], voc, InMatrix[id]);
__syncthreads();
}
__global__ void make_Vector_B(float *OutVector, float *Elemets, int indecies)
{
unsigned int id =threadIdx.x + blockIdx.x * blockDim.x;
OutVector[id] = Elemets[indecies + id];
//printf("id = %d IN = %f\n", indecies + id, Elemets[indecies + id]);
__syncthreads();
}
__global__ void make_symmetric_Matrix(float *Out, float *In)
{
unsigned int id =threadIdx.x + blockIdx.x * blockDim.x;
if( blockIdx.x > threadIdx.x)
{
if(!(Out[id]==In[id]))
{
Out[id] = In[id];
}
}
}
__global__ void transpose_opration_Matrix(float *Out, float *In)
{
unsigned int id =threadIdx.x + blockIdx.x * blockDim.x;
int In_index = blockIdx.x + threadIdx.x * blockDim.x;
Out[id] = In[In_index];
__syncthreads();
}
__global__ void get_FullHessian_Elements(float *outElements, float *inElements)
{
unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
float temp_here;
/*if(blockIdx.x == 0){
//outElements[id] = inElements[threadIdx.x];
temp_here = inElements[threadIdx.x];
}
if(threadIdx.x==0){
//outElements[id] = inElements[blockIdx.x];
temp_here = inElements[blockIdx.x];
}
if(threadIdx.x * blockIdx.x != 0){
// int i_id;
// i_id = blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1);
//outElements[id] = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)];
temp_here = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)];
}*/
int vect_id = blockIdx.x;
if(threadIdx.x <= blockIdx.x){
for(int t_id = 0; t_id < threadIdx.x; t_id++){
int sum_a = t_id + 1;
vect_id += (HORIZON - sum_a);
}
//outElements[id] = inElements[vect_id];
temp_here = inElements[vect_id];
}else{
//outElements[id] = 0.0f;
temp_here = 0.0f;
}
if(threadIdx.x != blockIdx.x){
//outElements[id] = outElements[id] / 2;
outElements[id] = temp_here / 2;
}else{
outElements[id] = temp_here;
}
//printf("outElements[%d] == %f\n", id, outElements[id]);
__syncthreads();
}
__global__ void getRegularMatrix(float *outRmatrix, HyperParaboloid *elements, int sumSet)
{
unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
outRmatrix[id] = 0.0f;
//float temp_here = 0.0f;
for(int index = 0; index < sumSet; index++){
outRmatrix[id] += elements[index].tensor_vector[threadIdx.x] * elements[index].tensor_vector[blockIdx.x];
//float temp_here +=
}
//printf("id==%d, ans == %f\n", id, outRmatrix[id]);
__syncthreads();
}
__global__ void getRegularMatrix_overThreadLimit(float *outRmatrix, HyperParaboloid *elements, int sumSet, int Ydimention)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = iy * Ydimention + ix;
outRmatrix[id] = 0.0f;
for(int index = 0; index < sumSet; index++){
outRmatrix[id] += elements[index].tensor_vector[ix] * elements[index].tensor_vector[iy];
}
__syncthreads();
}
__global__ void make_tensor_vector(HyperParaboloid *output, MonteCarloMPC *input, int *indices)
{
unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
int next_indices = 0;
for(int i = 0; i < HORIZON; i++){
for(int j = i; j < HORIZON; j++){
// output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM;
output[id].tensor_vector[next_indices] = input[indices[id]].InputSeq[0][i] * input[indices[id]].InputSeq[0][j] * input[indices[id]].WHM;
//output[id].tensor_vector[next_indices] = i * j;
// output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM;
output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].InputSeq[0][i] * input[indices[id]].InputSeq[0][j] * input[indices[id]].WHM;
//output[id].column_vector[next_indices] = 3.0;
next_indices += 1;
}
}
for(int i = 0; i < HORIZON; i++){
// output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].WHM;
output[id].tensor_vector[next_indices] = input[indices[id]].InputSeq[0][i] * input[indices[id]].WHM;
//output[id].tensor_vector[next_indices] = i;
// output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].WHM;
output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].InputSeq[0][i] * input[indices[id]].WHM;
//output[id].column_vector[next_indices] = (1/2)*i;
next_indices += 1;
}
output[id].tensor_vector[SIZE_OF_PARABOLOIDVESTOR - 1] = 1.0f * input[indices[id]].WHM;
output[id].column_vector[SIZE_OF_PARABOLOIDVESTOR - 1] = input[indices[id]].L * input[indices[id]].WHM;
__syncthreads();
} | 148f264293375a0f96db67f161eb0bde9775152b.cu | /*
Functions for matrix operations
*/
#include "../include/Matrix.cuh"
unsigned int countBlocks(unsigned int a, unsigned int b) {
unsigned int num;
num = a / b;
if (a < b || a % b > 0)
num++;
return num;
}
void printMatrix(int m, int n, float*A, int lda, const char* name)
{
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
float Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
//printf("%s[%d] = %f\n", name, row + col*lda, Areg);
}
}
}
void shift_Input_vec( InputSequences *inputVector, int uIndex)
{
float temp[HORIZON]= { };
for(int i = 0; i < HORIZON - 1; i++){
temp[i] = inputVector[i+1].InputSeq[uIndex];
}
temp[HORIZON - 1] = inputVector[HORIZON - 1].InputSeq[uIndex];
for(int i = 0; i < HORIZON; i++){
inputVector[i].InputSeq[uIndex] = temp[i];
}
}
__global__ void SetUpIdentity_Matrix(float *IdMat)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
if(threadIdx.x == blockIdx.x)
{
IdMat[id] = 1.0f;
//values[threadIdx.x] = 1.0f;
}else{
IdMat[id] = 0.0f;
//values[threadIdx.x] = 0.0f;
}
__syncthreads();
}
__global__ void SetUpIdentity_Matrix_overThreadLimit(float *IdMat, int Ydimention)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = iy * Ydimention + ix;
if(ix == iy)
{
IdMat[id] = 1.0f;
//values[threadIdx.x] = 1.0f;
}else{
IdMat[id] = 0.0f;
//values[threadIdx.x] = 0.0f;
}
__syncthreads();
}
void GetInvMatrix(float *invMat, float *originMat, int num)
{
cublasHandle_t cublas_status;
CHECK_CUBLAS(cublasCreate(&cublas_status),"Failed to initialize cuBLAS");
float **arrayA;
float **arrayC;
float *d_arrayA;
float *d_arrayC;
int *d_LUPivots;
int *d_LUInfo;
size_t szMat = num * num * sizeof(float);
CHECK_CUDA(cudaMalloc(&arrayA, sizeof(float*)), "Failed to allocate arrayA");
CHECK_CUDA(cudaMalloc(&arrayC, sizeof(float*)), "Failed to allocate arrayC");
CHECK_CUDA(cudaMalloc(&d_arrayA, szMat), "Failed to allocate d_arrayA");
CHECK_CUDA(cudaMalloc(&d_arrayC, szMat), "Failed to allocate d_arrayC");
CHECK_CUDA(cudaMalloc(&d_LUPivots, sizeof(int)), "Failed to allocate arrayC");
CHECK_CUDA(cudaMalloc(&d_LUInfo, sizeof(int)), "Failed to allocate arrayC");
CHECK_CUDA(cudaMemcpy(d_arrayA, originMat, szMat, cudaMemcpyHostToDevice), "Failed to copy Origin Matrix to d_arrayA");
CHECK_CUDA(cudaMemcpy(arrayA, &d_arrayA, sizeof(float*), cudaMemcpyHostToDevice), "Failed to copy to arrayA");
CHECK_CUDA(cudaMemcpy(arrayC, &d_arrayC, sizeof(float*), cudaMemcpyHostToDevice), "Failed to copy to arrayC");
CHECK_CUBLAS(cublasSgetrfBatched(cublas_status, num, arrayA, num, d_LUPivots, d_LUInfo, 1), "Failed to perform LU decomp operation");
CHECK_CUDA(cudaDeviceSynchronize(), "Failed to synchronize after kernel call!");
CHECK_CUBLAS(cublasSgetriBatched(cublas_status, num, (const float **)arrayA, num, d_LUPivots, arrayC, num, d_LUInfo, 1), "Failed to perform Inverse operation!");
CHECK_CUDA(cudaDeviceSynchronize(), "Failed to synchronize after kernel call!");
CHECK_CUDA(cudaMemcpy(invMat, d_arrayC, szMat, cudaMemcpyDeviceToHost), "Failed to copy to invMat");
CHECK_CUDA(cudaFree(arrayA),"Failed to free arrayA");
CHECK_CUDA(cudaFree(arrayC),"Failed to free arrayC");
CHECK_CUDA(cudaFree(d_arrayA),"Failed to free d_arrayA");
CHECK_CUDA(cudaFree(d_arrayC),"Failed to free d_arrayC");
CHECK_CUDA(cudaFree(d_LUPivots),"Failed to free d_LUPivots");
CHECK_CUDA(cudaFree(d_LUInfo),"Failed to free d_LUInfo");
CHECK_CUBLAS(cublasDestroy(cublas_status), "Failed to destory cuBLAS");
}
void GetInvMatrixBycuSOLVER(float *invMat, float *Mat, int sz_dim)
{
cusolverDnHandle_t cusolverH = NULL;
CHECK_CUSOLVER(cusolverDnCreate(&cusolverH), "Failed to initialize cuSOLVER Handle_t");
// cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR;
cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER;
size_t szMat = sz_dim * sz_dim * sizeof(float);
float *d_arrayA;
float *d_arrayInv;
CHECK_CUDA(cudaMalloc(&d_arrayA, szMat), "Failed to allocate arrayA at cuSOLVER");
CHECK_CUDA(cudaMalloc(&d_arrayInv, szMat), "Failed to allocate arrayInv at cuSOLVER");
CHECK_CUDA(cudaMemcpy(d_arrayA, Mat, szMat, cudaMemcpyHostToDevice), "Failed to copy Matrix data to d_arrayA");
int work_size;
float *work_space;
int *devInfo;
CHECK_CUDA(cudaMalloc((void**)&devInfo, sizeof(int)), "Failed to allocate devInfo at Func # GetInvMatrixBycuSOLVER #");
dim3 cuSolverBlock(2,2);
dim3 cuSolverGrid((sz_dim + cuSolverBlock.x -1)/ cuSolverBlock.x, (sz_dim + cuSolverBlock.y -1) / cuSolverBlock.y);
CHECK_CUSOLVER(cusolverDnSpotrf_bufferSize(cusolverH, uplo, sz_dim, d_arrayA, sz_dim, &work_size),"Failed to get work_size at cuSOLVER");
CHECK_CUDA(cudaMalloc((void**)&work_space, sizeof(float)*work_size), "Failed to allocate work_space at cuSOLVER");
CHECK_CUSOLVER(cusolverDnSpotrf(cusolverH, uplo, sz_dim, d_arrayA, sz_dim, work_space, work_size, devInfo), "Failed to Function # cusolverDnSpotrf #");
if(sz_dim > 1024){
SetUpIdentity_Matrix_overThreadLimit<<<cuSolverGrid, cuSolverBlock>>>(d_arrayInv, sz_dim);
}else{
SetUpIdentity_Matrix<<<sz_dim, sz_dim>>>( d_arrayInv );
}
CHECK_CUDA(cudaDeviceSynchronize(), "Failed to synchronize after kernel call at cuSOLVER!");
CHECK_CUSOLVER(cusolverDnSpotrs(cusolverH, uplo, sz_dim, sz_dim, d_arrayA, sz_dim, d_arrayInv, sz_dim, devInfo),"Failed to perform Inverse operation at cuSOLVER!");
CHECK_CUDA(cudaMemcpy(invMat, d_arrayInv, szMat, cudaMemcpyDeviceToHost),"Failed to copy Result at cuSOLVER");
CHECK_CUDA(cudaFree(d_arrayA),"Failed to free arrayA at cuSOLVER");
CHECK_CUDA(cudaFree(d_arrayInv),"Failed to free arrayI at cuSOLVER");
CHECK_CUDA(cudaFree(work_space),"Failed to free work_space at cuSOLVER");
CHECK_CUDA(cudaFree(devInfo),"Failed to free devInfo at cuSOLVER");
CHECK_CUSOLVER(cusolverDnDestroy(cusolverH),"Failed to destory cuSOLVER Handle_t");
}
__global__ void GetResultMatrixProduct( float *ans, float *lmat, float *rmat, const int nx )
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = iy * nx + ix;
if( ix < HORIZON && iy < HORIZON)
{
float el = 0.0f;
for(int i = 0; i < HORIZON; i++)
{
el += lmat[ iy * nx + i] * rmat[ i * nx + ix ];
}
ans[id] = el;
}
__syncthreads();
}
__global__ void multiply_matrix(float *OutMatrix, float voc, float *InMatrix)
{
unsigned int id = threadIdx.x + blockDim.x * blockIdx.x;
OutMatrix[id] = voc * InMatrix[id];
//printf("OutMatrix[%d] == %f = %f * %f\n",id, OutMatrix[id], voc, InMatrix[id]);
__syncthreads();
}
__global__ void make_Vector_B(float *OutVector, float *Elemets, int indecies)
{
unsigned int id =threadIdx.x + blockIdx.x * blockDim.x;
OutVector[id] = Elemets[indecies + id];
//printf("id = %d IN = %f\n", indecies + id, Elemets[indecies + id]);
__syncthreads();
}
__global__ void make_symmetric_Matrix(float *Out, float *In)
{
unsigned int id =threadIdx.x + blockIdx.x * blockDim.x;
if( blockIdx.x > threadIdx.x)
{
if(!(Out[id]==In[id]))
{
Out[id] = In[id];
}
}
}
__global__ void transpose_opration_Matrix(float *Out, float *In)
{
unsigned int id =threadIdx.x + blockIdx.x * blockDim.x;
int In_index = blockIdx.x + threadIdx.x * blockDim.x;
Out[id] = In[In_index];
__syncthreads();
}
__global__ void get_FullHessian_Elements(float *outElements, float *inElements)
{
unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
float temp_here;
/*if(blockIdx.x == 0){
//outElements[id] = inElements[threadIdx.x];
temp_here = inElements[threadIdx.x];
}
if(threadIdx.x==0){
//outElements[id] = inElements[blockIdx.x];
temp_here = inElements[blockIdx.x];
}
if(threadIdx.x * blockIdx.x != 0){
// int i_id;
// i_id = blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1);
//outElements[id] = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)];
temp_here = inElements[blockIdx.x + (HORIZON - 1) + (threadIdx.x - 1)];
}*/
int vect_id = blockIdx.x;
if(threadIdx.x <= blockIdx.x){
for(int t_id = 0; t_id < threadIdx.x; t_id++){
int sum_a = t_id + 1;
vect_id += (HORIZON - sum_a);
}
//outElements[id] = inElements[vect_id];
temp_here = inElements[vect_id];
}else{
//outElements[id] = 0.0f;
temp_here = 0.0f;
}
if(threadIdx.x != blockIdx.x){
//outElements[id] = outElements[id] / 2;
outElements[id] = temp_here / 2;
}else{
outElements[id] = temp_here;
}
//printf("outElements[%d] == %f\n", id, outElements[id]);
__syncthreads();
}
__global__ void getRegularMatrix(float *outRmatrix, HyperParaboloid *elements, int sumSet)
{
unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
outRmatrix[id] = 0.0f;
//float temp_here = 0.0f;
for(int index = 0; index < sumSet; index++){
outRmatrix[id] += elements[index].tensor_vector[threadIdx.x] * elements[index].tensor_vector[blockIdx.x];
//float temp_here +=
}
//printf("id==%d, ans == %f\n", id, outRmatrix[id]);
__syncthreads();
}
__global__ void getRegularMatrix_overThreadLimit(float *outRmatrix, HyperParaboloid *elements, int sumSet, int Ydimention)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int id = iy * Ydimention + ix;
outRmatrix[id] = 0.0f;
for(int index = 0; index < sumSet; index++){
outRmatrix[id] += elements[index].tensor_vector[ix] * elements[index].tensor_vector[iy];
}
__syncthreads();
}
__global__ void make_tensor_vector(HyperParaboloid *output, MonteCarloMPC *input, int *indices)
{
unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
int next_indices = 0;
for(int i = 0; i < HORIZON; i++){
for(int j = i; j < HORIZON; j++){
// output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM;
output[id].tensor_vector[next_indices] = input[indices[id]].InputSeq[0][i] * input[indices[id]].InputSeq[0][j] * input[indices[id]].WHM;
//output[id].tensor_vector[next_indices] = i * j;
// output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].Input[j] * input[indices[id]].WHM;
output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].InputSeq[0][i] * input[indices[id]].InputSeq[0][j] * input[indices[id]].WHM;
//output[id].column_vector[next_indices] = 3.0;
next_indices += 1;
}
}
for(int i = 0; i < HORIZON; i++){
// output[id].tensor_vector[next_indices] = input[indices[id]].Input[i] * input[indices[id]].WHM;
output[id].tensor_vector[next_indices] = input[indices[id]].InputSeq[0][i] * input[indices[id]].WHM;
//output[id].tensor_vector[next_indices] = i;
// output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].Input[i] * input[indices[id]].WHM;
output[id].column_vector[next_indices] = input[indices[id]].L * input[indices[id]].InputSeq[0][i] * input[indices[id]].WHM;
//output[id].column_vector[next_indices] = (1/2)*i;
next_indices += 1;
}
output[id].tensor_vector[SIZE_OF_PARABOLOIDVESTOR - 1] = 1.0f * input[indices[id]].WHM;
output[id].column_vector[SIZE_OF_PARABOLOIDVESTOR - 1] = input[indices[id]].L * input[indices[id]].WHM;
__syncthreads();
} |
b466fdea44654c05dc64a4d03c3858b24492ae71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, scalar_cast<real>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
hipLaunchKernelGGL(( THCTensor_kernel_renorm<real, accreal>)
, dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
hipError_t errcode = hipGetLastError();
if(errcode != hipSuccess)
THError(hipGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(hipGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else {
THC_reduceAll(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(hipGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if TORCH_HIP_VERSION >= 7000
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<real, accreal>(value));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(hipGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(hipGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
#endif
| b466fdea44654c05dc64a4d03c3858b24492ae71.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorMathReduce.cu"
#else
THC_API void
THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim(state, self, src,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(1),
dimension,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
if (!THC_reduceDim(state, self, src,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
ReduceDivide<accreal>{size},
scalar_cast<accreal>(0),
dim,
keepdim)) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
THC_API void
THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, real value, int dimension, real maxnorm)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
THCTensor *self_;
THCTensor *src_ = THCTensor_(newTranspose)(state, src, dimension, 0);
THCTensor *data = THCTensor_(newClone)(state, src_);
ptrdiff_t size = THCTensor_(nElement)(state, data)/data->size[0];
THArgCheck(dimension >= 0 && dimension < THCTensor_(nDimension)(state, src), 3, "invalid dimension");
THArgCheck(THCNumerics<real>::gt(value, scalar_cast<real>(0)), 2, "non-positive-norm not supported");
THArgCheck(THCTensor_(nDimension)(state, src) > 1, 1, "need at least 2 dimensions");
dim3 grid(data->size[0]);
dim3 threads(32);
THCTensor_kernel_renorm<real, accreal>
<<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, data), scalar_cast<accreal>(value), size, scalar_cast<accreal>(maxnorm));
cudaError errcode = cudaGetLastError();
if(errcode != cudaSuccess)
THError(cudaGetErrorString(errcode));
THCTensor_(free)(state, src_);
self_ = THCTensor_(newTranspose)(state, data, dimension, 0);
THCTensor_(resizeAs)(state, self, self_);
THCTensor_(freeCopyTo)(state, self_, self);
THCTensor_(free)(state, data);
}
THC_API void
THCTensor_(std)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, true>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, true>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API void
THCTensor_(var)(THCState *state, THCTensor *self_, THCTensor *src, int dimension, int biased, int keepdim)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src));
TensorUtils<THCTensor>::preserveReduceDimSemantics(
state, self_, THCTensor_(nDimension)(state, src), dimension, keepdim);
THLongStorage *dim = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(dim, dimension, 1);
THCTensor_(resize)(state, self_, dim, NULL);
THLongStorage_free(dim);
THCTensor *self = THCTensor_(newContiguous)(state, self_);
src = THCTensor_(newContiguous)(state, src);
if (dimension == THCTensor_(nDimension)(state, src) - 1) {
THCTensor_varInnermostDim<THCTensor, real, accreal, false>(state, self, src, biased);
} else {
THCTensor_varOuterDim<THCTensor, real, accreal, false>(state, self, src, dimension, biased);
}
THCTensor_(free)(state, src);
THCTensor_(freeCopyTo)(state, self, self_);
if (!keepdim) {
THCTensor_(squeeze1d)(state, self_, self_, dimension);
}
}
THC_API accreal
THCTensor_(stdall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
return THCNumerics<accreal>::sqrt((THCTensor_(varall)(state, self, biased)));
}
THC_API accreal
THCTensor_(varall)(THCState *state, THCTensor *self, int biased)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal mean = THCTensor_(meanall)(state, self);
accreal val;
if (!THC_reduceAll(state, self,
SquareFunctor<accreal>(mean),
ReduceAdd<accreal>(),
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
val = THCNumerics<accreal>::div(
val,
scalar_cast<accreal>(THCTensor_(nElement)(state, self) - (biased ? 0 : 1))
);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(norm)(THCState *state, THCTensor* self, THCTensor* src, real _value, int dimension, int keepdim)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceDim(state, self, src,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{scalar_cast<accreal>(.5)},
scalar_cast<accreal>(0),
dimension, keepdim);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
thrust::identity<accreal>{},
scalar_cast<accreal>(0),
dimension, keepdim);
} else {
THC_reduceDim(state, self, src,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
ReducePow<accreal>{THCNumerics<accreal>::cinv(value)},
scalar_cast<accreal>(0),
dimension, keepdim);
}
THCudaCheck(cudaGetLastError());
}
THC_API accreal
THCTensor_(normall)(THCState *state, THCTensor *self, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal result;
if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(0))) {
THC_reduceAll(state, self,
TensorNonZeroOp<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(1))) {
THC_reduceAll(state, self,
TensorNormOp<accreal, 1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(2))) {
THC_reduceAll(state, self,
TensorNormOp<accreal, 2>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::sqrt(result);
} else if (THCNumerics<accreal>::eq(value, scalar_cast<accreal>(INFINITY))) {
THC_reduceAll(state, self,
TensorNormOp<accreal, 1>{value},
ReduceMax<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
} else {
THC_reduceAll(state, self,
TensorNormOp<accreal, -1>{value},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&result, 0);
result = THCNumerics<accreal>::pow(result,
THCNumerics<accreal>::cinv(value));
}
THCudaCheck(cudaGetLastError());
return result;
}
accreal THCTensor_(dist)(THCState *state, THCTensor *self,
THCTensor *src, real _value)
{
const accreal value = scalar_cast<accreal>(_value);
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
self = THCTensor_(newContiguous)(state, self);
ptrdiff_t size = THCTensor_(nElement)(state, self);
src = THCTensor_(newContiguous)(state, src);
thrust::device_ptr<real> self_data(THCTensor_(data)(state, self));
thrust::device_ptr<real> src_data(THCTensor_(data)(state, src));
THCThrustAllocator thrustAlloc(state);
accreal result = thrust::inner_product(
#if CUDA_VERSION >= 7000
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
self_data, self_data+size, src_data, scalar_cast<accreal>(0),
thrust::plus<accreal>(),
ThrustTensorDistOp<real, accreal>(value));
THCTensor_(free)(state, src);
THCTensor_(free)(state, self);
return THCNumerics<accreal>::pow(result, THCNumerics<accreal>::cinv(value));
}
#endif
THC_API accreal
THCTensor_(sumall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceAdd<accreal>{},
scalar_cast<accreal>(0),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(prodall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceMultiply<accreal>{},
scalar_cast<accreal>(1),
&val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return val;
}
THC_API accreal
THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
THArgCheck(self->nDimension > 0, 1, "empty Tensor");
return THCTensor_(sumall)(state, self)/THCTensor_(nElement)(state, self);
}
THC_API real
THCTensor_(minall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceMin<accreal>{},
THCNumerics<accreal>::max(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(maxall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
accreal val;
if (!THC_reduceAll(state, self,
thrust::identity<accreal>{},
ReduceMax<accreal>{},
THCNumerics<accreal>::min(), &val, 0)) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
return scalar_cast<real>(val);
}
THC_API real
THCTensor_(medianall)(THCState *state, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
real val;
ptrdiff_t nelem, k;
nelem = THCTensor_(nElement)(state, self);
k = (nelem-1) >> 1;
THLongStorage *size = THLongStorage_newWithSize1(nelem);
THCTensor *view = THCTensor_(newView)(state, self, size);
THLongStorage_free(size);
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, indices, view, 0, 0);
val = THCTensor_(get1d)(state, sorted, k);
THCTensor_(free)(state, view);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, indices);
THCudaCheck(cudaGetLastError());
return val;
}
THC_API void
THCTensor_(median)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *self,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
int64_t t_size_dim, k;
t_size_dim = THCTensor_(size)(state, self, dimension);
k = (t_size_dim-1) >> 1;
THCTensor *sorted = THCTensor_(new)(state);
THCudaLongTensor *sorted_indices = THCudaLongTensor_new(state);
THCTensor_(sort)(state, sorted, sorted_indices, self, dimension, 0);
THCTensor *newValues = THCTensor_(newNarrow)(state, sorted, dimension, k, 1);
THCudaLongTensor *newIndices = THCudaLongTensor_newNarrow(state, sorted_indices, dimension, k, 1);
THCTensor_(free)(state, sorted);
THCudaLongTensor_free(state, sorted_indices);
if (!keepdim) {
THCTensor_(squeeze1d)(state, newValues, newValues, dimension);
THCudaLongTensor_squeeze1d(state, newIndices, newIndices, dimension);
}
THCTensor_(resizeAs)(state, values, newValues);
THCudaLongTensor_resizeAs(state, indices, newIndices);
THCTensor_(copy)(state, values, newValues);
THCudaLongTensor_copy(state, indices, newIndices);
THCTensor_(free)(state, newValues);
THCudaLongTensor_free(state, newIndices);
THCudaCheck(cudaGetLastError());
}
THC_API void
THCTensor_(max)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::min(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MaxValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
THC_API void
THCTensor_(min)(THCState *state,
THCTensor *values,
THCudaLongTensor *indices,
THCTensor *src,
int dimension,
int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, values, indices, src));
thrust::pair<typename TensorUtils<THCTensor>::DataType, int64_t>
init =
thrust::make_pair<typename TensorUtils<THCTensor>::DataType, int64_t>(
THCNumerics<typename TensorUtils<THCTensor>::DataType>::max(), 0);
return THC_reduceDimIndex(
state, values, indices, src, dimension, keepdim, init,
MinValuePair<typename TensorUtils<THCTensor>::DataType, int64_t>());
}
#endif
|
1cdece90cd1ba63e02db9f6d44023534f6ee3c52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <string.h>
#include <sstream>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__global__ void preprocess(double * d_XX,double *d_mat,unsigned long long int SIZE,int N,unsigned long long int CHUNKY){
unsigned long long int ind = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long long int start_loc = ind*CHUNKY;
unsigned long long int start_of_XX = ind*N;
for(int i=0;i<N;i++){
// since all arrays are flattened
d_XX[start_of_XX + i] = d_mat[N*(N-1)+ i];
for(int j=0; j<N; j++){
d_XX[start_of_XX + i] -= ((double)d_mat[(j*N)+i]/2);
}
}
unsigned long long int y = (start_loc>>1) ^ start_loc;
for(int i=0;i<N;i++){
for(int k=0;k<N;k++){
if( ( (y >> k ) & 1 ) == 1){
d_XX[start_of_XX+i] += d_mat[N*k+i]; // M[i][k]
}
}
}
}
__global__ void perm_kernel(double * d_XX,unsigned long long int CHUNKY,double *d_p,double *d_mat,unsigned long long int SIZE,int N){
unsigned long long int ind = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long long int start_loc = ind*CHUNKY + 1;
unsigned long long int start_of_XX = ind*N;
// GOTTO start from start_loc+1 then go until CHUNKY+1
unsigned long long int LIMITER = start_loc+CHUNKY;
// carefull last chunk start_loc might go one over...
int ps = (start_loc & 1LL) == 0 ? -1:1;
double* my_XX = d_XX + start_of_XX;
// do the calculations for the whole sha-bang
double local_p = 0.0; // this for local, then reduce it to outer
for(unsigned long long i = start_loc; (i < LIMITER) && (i < SIZE) ;i++){
unsigned long long int y = (i>>1) ^ i; // gray code
unsigned long long int yy = ( (i-1)>>1 ) ^ (i-1); // i-1's gray-code
int z = __ffs( y ^ yy )-1; // get the changing bit
int s = ((y >> z) & 1LL) == 1 ? 1:-1; // find changing bit
double dd = 1.0;
for(int j=0;j<N;j++){
my_XX[j] += s * d_mat[N*z+j]; // M[j][Z]
dd *= my_XX[j];
}
ps *= -1;
local_p += ps * dd;
}
// do a reduction on the d_p !!!!
if(ind==1) printf("Thread %d's local P is: %.2lf\n",ind,local_p);
atomicAdd(d_p,local_p);
}
void usage()
{
cout << "USAGE: ./exec <filename> <machine no>" << endl;
exit(0);
}
int main(int argc, const char** argv)
{
if(argc != 3)
usage();
string line;
const char* filename = argv[1];
int MACHINE_NO = atoi(argv[2]);
ifstream input (filename);
if(input.fail())
return 0;
int N;
int **M;
getline(input,line);
N = atoi(line.c_str());
M = new int*[N];
for(int i = 0; i < N; i ++){
M[i] = new int[N];
}
int linectr = 0;
while(getline(input,line)){
stringstream ss(line);
int temp;
int ctr = 0;
while(ss >> temp)
M[linectr][ctr++] = temp;
linectr++;
}
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
unsigned int threads = prop.maxThreadsPerBlock;
unsigned int threadDIM = prop.maxThreadsDim[3];
unsigned int sharedmem = prop.sharedMemPerBlock;
cout << "Here are the specs\n";
cout << "Shared mem per block: " << sharedmem << "\n";
// NEED TO FLATTEN THE ARRAY
double * data_as_array = new double[N*N]();
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
// colum-major order
data_as_array[i*N + j] = (double)M[j][i];
}
}
hipSetDevice(MACHINE_NO);
int size_mat = N*N * sizeof(double);
double *d_mat;
double *d_p;
double *d_XX;
double p = 1.0;
double *x = (double*)malloc(sizeof(double)*N);
for(int i=0;i<N;i++){
x[i]= M[i][N-1];
for(int j=0;j<N;j++){
x[i] -= ((double)M[i][j]/2);
}
p *= x[i];
}
unsigned long long int SIZE = (unsigned long long int)1 << (N-1);
unsigned long long int THREADS = 1024;
unsigned long long int BLOCKS = 32;
while(SIZE < (THREADS*BLOCKS)){
if(BLOCKS != 1){
BLOCKS /=2;
}else{
THREADS /=2;
}
}
unsigned long long int CHUNKY = SIZE/(THREADS*BLOCKS);
cout << "Current thread to block;\n\t THREAD: " << THREADS
<< "\n\t BLOCKS: " << BLOCKS << "\n";
double *x_s = (double*) malloc(sizeof(double)*N);
/*
cout<<"Here is the initial X array: \n";
for(int i=0;i<N;i++){
printf("%.2lf ",x[i]);
}
printf("\n");
*/
/*
int tid = 2;
long long int sloc = tid*CHUNKY;
int yyy = (sloc >>1)^sloc;
for(int i=0;i<N;i++){
x_s[i] = x[i];
for(int k=0;k<N;k++){
if(((yyy>>k) & 1) == 1) {x_s[i] += M[i][k];}
}
}
long long int starts = tid*CHUNKY+1;
double local_p = 1.0;
for(int i=starts;i<starts+CHUNKY+1;i++){
int y = (i>>1) ^ i;
int yy = ( (i-1)>> 1) ^ (i-1);
int z = __builtin_ctz(y^yy);
int s = ((y>>z)& 1) == 1 ? 1:-1;
int prodsign = (i & 1) == 0 ? 1:-1;
double dd = 1.0;
for(int j=0;j<N;j++){
x_s[j] += (double)(s*M[j][z]);
dd *= x_s[j];
}
local_p += (double)(prodsign*dd);
}
printf("Here is the p in CPU: %.2lf\n",local_p);
*/
/*
cout << "Here is the X array for " << CHUNKY << " \n";
for(int i=0;i<N;i++){
printf("%.2lf ",x_s[i]);
}
printf("\n");
*/
double *XX = (double* )malloc(sizeof(double)*N*THREADS*BLOCKS);
memset(XX,0.0,sizeof(double)*N*THREADS*BLOCKS);
// memory moving magiac
cout << "Chunky is this: " << CHUNKY << " \n";
cout << "Size is this: " << SIZE << " \n";
cout << "N is this: " << N << " \n";
hipMalloc((void **)&d_XX,THREADS*BLOCKS*N*sizeof(double));
hipMalloc((void **)&d_mat,size_mat);
hipMalloc((void **)&d_p,sizeof(double));
cout << "Memory Allocated...\n";
hipMemcpy(d_XX,XX,THREADS*BLOCKS*N*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(d_mat,data_as_array,size_mat,hipMemcpyHostToDevice);
hipMemcpy(d_p,&p,sizeof(double),hipMemcpyHostToDevice);
hipDeviceSynchronize();
cout << "Memory Copied...\n";
// preprocess the fuck out of ithipLaunchKernelGGL((
preprocess), dim3(BLOCKS),dim3(THREADS), 0, 0, d_XX,d_mat,SIZE,N,CHUNKY);
hipMemcpy(XX,d_XX,THREADS*BLOCKS*N*sizeof(double),hipMemcpyDeviceToHost);
cout << "Preprocess finished running...\n";
/*
cout << "Here is the initial XX array: \n";
for(int i=0;i<N;i++){
printf("%.2lf ",XX[i]);
}
printf("\n");
*/
/*
cout << "Here is the XX array for " << CHUNKY << " \n";
for(int i=0;i<N;i++){
printf("%.2lf ",XX[N+i]);
}
printf("\n");
*/
cout << "Algo starts now.. Hold on to your seats\n";
double start,end;
start = omp_get_wtime();
hipLaunchKernelGGL(( perm_kernel), dim3(BLOCKS),dim3(THREADS), 0, 0, d_XX,CHUNKY,d_p,d_mat,SIZE,N);
cout << "Kernel finished running...\n";
hipMemcpy(&p,d_p,sizeof(double),hipMemcpyDeviceToHost);
end = omp_get_wtime();
cout << "Memory re-copied from the device to host...\n";
p*= (4*(N & 1) - 2);
cout << "Result is: " << p <<" \n";
double result = end-start;
cout <<"The time the kernel took: " << result << " ...\n";
hipFree(d_XX);
hipFree(d_mat);
return 0;
}
| 1cdece90cd1ba63e02db9f6d44023534f6ee3c52.cu | #include <iostream>
#include <fstream>
#include <string.h>
#include <sstream>
#include <omp.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
__global__ void preprocess(double * d_XX,double *d_mat,unsigned long long int SIZE,int N,unsigned long long int CHUNKY){
unsigned long long int ind = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long long int start_loc = ind*CHUNKY;
unsigned long long int start_of_XX = ind*N;
for(int i=0;i<N;i++){
// since all arrays are flattened
d_XX[start_of_XX + i] = d_mat[N*(N-1)+ i];
for(int j=0; j<N; j++){
d_XX[start_of_XX + i] -= ((double)d_mat[(j*N)+i]/2);
}
}
unsigned long long int y = (start_loc>>1) ^ start_loc;
for(int i=0;i<N;i++){
for(int k=0;k<N;k++){
if( ( (y >> k ) & 1 ) == 1){
d_XX[start_of_XX+i] += d_mat[N*k+i]; // M[i][k]
}
}
}
}
__global__ void perm_kernel(double * d_XX,unsigned long long int CHUNKY,double *d_p,double *d_mat,unsigned long long int SIZE,int N){
unsigned long long int ind = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long long int start_loc = ind*CHUNKY + 1;
unsigned long long int start_of_XX = ind*N;
// GOTTO start from start_loc+1 then go until CHUNKY+1
unsigned long long int LIMITER = start_loc+CHUNKY;
// carefull last chunk start_loc might go one over...
int ps = (start_loc & 1LL) == 0 ? -1:1;
double* my_XX = d_XX + start_of_XX;
// do the calculations for the whole sha-bang
double local_p = 0.0; // this for local, then reduce it to outer
for(unsigned long long i = start_loc; (i < LIMITER) && (i < SIZE) ;i++){
unsigned long long int y = (i>>1) ^ i; // gray code
unsigned long long int yy = ( (i-1)>>1 ) ^ (i-1); // i-1's gray-code
int z = __ffs( y ^ yy )-1; // get the changing bit
int s = ((y >> z) & 1LL) == 1 ? 1:-1; // find changing bit
double dd = 1.0;
for(int j=0;j<N;j++){
my_XX[j] += s * d_mat[N*z+j]; // M[j][Z]
dd *= my_XX[j];
}
ps *= -1;
local_p += ps * dd;
}
// do a reduction on the d_p !!!!
if(ind==1) printf("Thread %d's local P is: %.2lf\n",ind,local_p);
atomicAdd(d_p,local_p);
}
void usage()
{
cout << "USAGE: ./exec <filename> <machine no>" << endl;
exit(0);
}
int main(int argc, const char** argv)
{
if(argc != 3)
usage();
string line;
const char* filename = argv[1];
int MACHINE_NO = atoi(argv[2]);
ifstream input (filename);
if(input.fail())
return 0;
int N;
int **M;
getline(input,line);
N = atoi(line.c_str());
M = new int*[N];
for(int i = 0; i < N; i ++){
M[i] = new int[N];
}
int linectr = 0;
while(getline(input,line)){
stringstream ss(line);
int temp;
int ctr = 0;
while(ss >> temp)
M[linectr][ctr++] = temp;
linectr++;
}
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
unsigned int threads = prop.maxThreadsPerBlock;
unsigned int threadDIM = prop.maxThreadsDim[3];
unsigned int sharedmem = prop.sharedMemPerBlock;
cout << "Here are the specs\n";
cout << "Shared mem per block: " << sharedmem << "\n";
// NEED TO FLATTEN THE ARRAY
double * data_as_array = new double[N*N]();
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
// colum-major order
data_as_array[i*N + j] = (double)M[j][i];
}
}
cudaSetDevice(MACHINE_NO);
int size_mat = N*N * sizeof(double);
double *d_mat;
double *d_p;
double *d_XX;
double p = 1.0;
double *x = (double*)malloc(sizeof(double)*N);
for(int i=0;i<N;i++){
x[i]= M[i][N-1];
for(int j=0;j<N;j++){
x[i] -= ((double)M[i][j]/2);
}
p *= x[i];
}
unsigned long long int SIZE = (unsigned long long int)1 << (N-1);
unsigned long long int THREADS = 1024;
unsigned long long int BLOCKS = 32;
while(SIZE < (THREADS*BLOCKS)){
if(BLOCKS != 1){
BLOCKS /=2;
}else{
THREADS /=2;
}
}
unsigned long long int CHUNKY = SIZE/(THREADS*BLOCKS);
cout << "Current thread to block;\n\t THREAD: " << THREADS
<< "\n\t BLOCKS: " << BLOCKS << "\n";
double *x_s = (double*) malloc(sizeof(double)*N);
/*
cout<<"Here is the initial X array: \n";
for(int i=0;i<N;i++){
printf("%.2lf ",x[i]);
}
printf("\n");
*/
/*
int tid = 2;
long long int sloc = tid*CHUNKY;
int yyy = (sloc >>1)^sloc;
for(int i=0;i<N;i++){
x_s[i] = x[i];
for(int k=0;k<N;k++){
if(((yyy>>k) & 1) == 1) {x_s[i] += M[i][k];}
}
}
long long int starts = tid*CHUNKY+1;
double local_p = 1.0;
for(int i=starts;i<starts+CHUNKY+1;i++){
int y = (i>>1) ^ i;
int yy = ( (i-1)>> 1) ^ (i-1);
int z = __builtin_ctz(y^yy);
int s = ((y>>z)& 1) == 1 ? 1:-1;
int prodsign = (i & 1) == 0 ? 1:-1;
double dd = 1.0;
for(int j=0;j<N;j++){
x_s[j] += (double)(s*M[j][z]);
dd *= x_s[j];
}
local_p += (double)(prodsign*dd);
}
printf("Here is the p in CPU: %.2lf\n",local_p);
*/
/*
cout << "Here is the X array for " << CHUNKY << " \n";
for(int i=0;i<N;i++){
printf("%.2lf ",x_s[i]);
}
printf("\n");
*/
double *XX = (double* )malloc(sizeof(double)*N*THREADS*BLOCKS);
memset(XX,0.0,sizeof(double)*N*THREADS*BLOCKS);
// memory moving magiac
cout << "Chunky is this: " << CHUNKY << " \n";
cout << "Size is this: " << SIZE << " \n";
cout << "N is this: " << N << " \n";
cudaMalloc((void **)&d_XX,THREADS*BLOCKS*N*sizeof(double));
cudaMalloc((void **)&d_mat,size_mat);
cudaMalloc((void **)&d_p,sizeof(double));
cout << "Memory Allocated...\n";
cudaMemcpy(d_XX,XX,THREADS*BLOCKS*N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_mat,data_as_array,size_mat,cudaMemcpyHostToDevice);
cudaMemcpy(d_p,&p,sizeof(double),cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cout << "Memory Copied...\n";
// preprocess the fuck out of it
preprocess<<<BLOCKS,THREADS>>>(d_XX,d_mat,SIZE,N,CHUNKY);
cudaMemcpy(XX,d_XX,THREADS*BLOCKS*N*sizeof(double),cudaMemcpyDeviceToHost);
cout << "Preprocess finished running...\n";
/*
cout << "Here is the initial XX array: \n";
for(int i=0;i<N;i++){
printf("%.2lf ",XX[i]);
}
printf("\n");
*/
/*
cout << "Here is the XX array for " << CHUNKY << " \n";
for(int i=0;i<N;i++){
printf("%.2lf ",XX[N+i]);
}
printf("\n");
*/
cout << "Algo starts now.. Hold on to your seats\n";
double start,end;
start = omp_get_wtime();
perm_kernel<<<BLOCKS,THREADS>>>(d_XX,CHUNKY,d_p,d_mat,SIZE,N);
cout << "Kernel finished running...\n";
cudaMemcpy(&p,d_p,sizeof(double),cudaMemcpyDeviceToHost);
end = omp_get_wtime();
cout << "Memory re-copied from the device to host...\n";
p*= (4*(N & 1) - 2);
cout << "Result is: " << p <<" \n";
double result = end-start;
cout <<"The time the kernel took: " << result << " ...\n";
cudaFree(d_XX);
cudaFree(d_mat);
return 0;
}
|
687aa7c3aa55f58b13869dc561d3ae86e34b8c8b.hip | // !!! This is a file automatically generated by hipify!!!
//#include <iostream>
#include <cmath>
#include <hip/hip_runtime.h>
//using std::cout;
//using std::endl;
//function is run on the device; specify device functions before global functions
__device__ float myAbs(float value) {
return (value < 0) ? (value * -1) : value;
} //end abs function
__global__ void findVals(float *x, float *y, int n, float h, float *sum, int *count){
//kernel: compute the sum and count - used later to compute mean
int me = blockIdx.x*blockDim.x+threadIdx.x;
float xi = x[me];
sum[me] = 0;
count[me] = 0;
for(int j=0; j<n; j++){
//now iterate through the j values
float xj = x[j]; //not needed - used for better visibility
if(myAbs(xj-xi) < h){
sum[me] += y[j];
count[me]++;
} //end abs if condition
} //end j for loop
} //end findVals function
//this is out "main" function - handles all the computation
void smoothc(float *x, float *y, float *m, int n, float h){
float *devx, //device x
*devy, //device y
*hsum, //host sums
*dsum; //device sum
int *hcount, //host count
*dcount; //device count
//size of arrays in bytes
int floatSize = n*sizeof(float);
int intSize = n*sizeof(int);
//allocate space on host
hsum = (float *) malloc(floatSize);
hcount = (int *) malloc(intSize);
//allocate space on device
hipMalloc((void **)&dsum, floatSize);
hipMalloc((void **)&dcount, intSize);
hipMalloc((void **)&devx, floatSize);
hipMalloc((void **)&devy, floatSize);
//Copy host parameters for device
hipMemcpy(devx,x,floatSize,hipMemcpyHostToDevice);
hipMemcpy(devy,y,floatSize,hipMemcpyHostToDevice);
int nThreads = min(n,500),
nBlocks = ceil(n/nThreads);
dim3 dimGrid(nBlocks,1);
dim3 dimBlock(nThreads,1,1);
//invoke the kernel
hipLaunchKernelGGL(( findVals), dim3(dimGrid),dim3(dimBlock), 0, 0, devx, devy, n, h, dsum, dcount);
//wait for kernel to finish;
hipDeviceSynchronize();
//copy results: device to host
hipMemcpy(hsum,dsum,floatSize,hipMemcpyDeviceToHost);
hipMemcpy(hcount,dcount,intSize,hipMemcpyDeviceToHost);
//compute the means
for(int i=0; i<n; i++){
m[i] = hsum[i]/hcount[i];
//NOTE: assuming perfect precision, hcount[i] must always be >=1 since |xi-xi|<h for all h>0
} //end i for loop
//clean up
free(hsum);
hipFree(dsum);
free(hcount);
hipFree(dcount);
hipFree(devx);
hipFree(devy);
} //end smoothc function
/*
int main() {
int n = 10;
float x[n];
float y[n];
float xcount = 10;
float h = 0.1;
for(int i = 0; i < n; i++) {
x[i] = y[i] = xcount;
}
//return array
float m[n];
smoothc(x, y, m, n, h);
for(int i = 0; i < n; i++) {
cout<<m[i]<<" ";
} cout<<endl;
}
*/
| 687aa7c3aa55f58b13869dc561d3ae86e34b8c8b.cu | //#include <iostream>
#include <cmath>
#include <cuda.h>
//using std::cout;
//using std::endl;
//function is run on the device; specify device functions before global functions
__device__ float myAbs(float value) {
return (value < 0) ? (value * -1) : value;
} //end abs function
__global__ void findVals(float *x, float *y, int n, float h, float *sum, int *count){
//kernel: compute the sum and count - used later to compute mean
int me = blockIdx.x*blockDim.x+threadIdx.x;
float xi = x[me];
sum[me] = 0;
count[me] = 0;
for(int j=0; j<n; j++){
//now iterate through the j values
float xj = x[j]; //not needed - used for better visibility
if(myAbs(xj-xi) < h){
sum[me] += y[j];
count[me]++;
} //end abs if condition
} //end j for loop
} //end findVals function
//this is out "main" function - handles all the computation
void smoothc(float *x, float *y, float *m, int n, float h){
float *devx, //device x
*devy, //device y
*hsum, //host sums
*dsum; //device sum
int *hcount, //host count
*dcount; //device count
//size of arrays in bytes
int floatSize = n*sizeof(float);
int intSize = n*sizeof(int);
//allocate space on host
hsum = (float *) malloc(floatSize);
hcount = (int *) malloc(intSize);
//allocate space on device
cudaMalloc((void **)&dsum, floatSize);
cudaMalloc((void **)&dcount, intSize);
cudaMalloc((void **)&devx, floatSize);
cudaMalloc((void **)&devy, floatSize);
//Copy host parameters for device
cudaMemcpy(devx,x,floatSize,cudaMemcpyHostToDevice);
cudaMemcpy(devy,y,floatSize,cudaMemcpyHostToDevice);
int nThreads = min(n,500),
nBlocks = ceil(n/nThreads);
dim3 dimGrid(nBlocks,1);
dim3 dimBlock(nThreads,1,1);
//invoke the kernel
findVals<<<dimGrid,dimBlock>>>(devx, devy, n, h, dsum, dcount);
//wait for kernel to finish;
cudaThreadSynchronize();
//copy results: device to host
cudaMemcpy(hsum,dsum,floatSize,cudaMemcpyDeviceToHost);
cudaMemcpy(hcount,dcount,intSize,cudaMemcpyDeviceToHost);
//compute the means
for(int i=0; i<n; i++){
m[i] = hsum[i]/hcount[i];
//NOTE: assuming perfect precision, hcount[i] must always be >=1 since |xi-xi|<h for all h>0
} //end i for loop
//clean up
free(hsum);
cudaFree(dsum);
free(hcount);
cudaFree(dcount);
cudaFree(devx);
cudaFree(devy);
} //end smoothc function
/*
int main() {
int n = 10;
float x[n];
float y[n];
float xcount = 10;
float h = 0.1;
for(int i = 0; i < n; i++) {
x[i] = y[i] = xcount;
}
//return array
float m[n];
smoothc(x, y, m, n, h);
for(int i = 0; i < n; i++) {
cout<<m[i]<<" ";
} cout<<endl;
}
*/
|
986aaf95aaeaa6e4bf4adcb08bcefd874810d0db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// SPDX-FileCopyrightText: 2021 CERN
// SPDX-License-Identifier: Apache-2.0
#include "example11.cuh"
#include <AdePT/BVHNavigator.h>
#include <CopCore/PhysicalConstants.h>
#include <G4HepEmGammaManager.hh>
#include <G4HepEmTrack.hh>
#include <G4HepEmGammaInteractionCompton.hh>
#include <G4HepEmGammaInteractionConversion.hh>
// Pull in implementation.
#include <G4HepEmGammaManager.icc>
#include <G4HepEmGammaInteractionCompton.icc>
#include <G4HepEmGammaInteractionConversion.icc>
__global__ void TransportGammas(Track *gammas, const adept::MParray *active, Secondaries secondaries,
adept::MParray *activeQueue, GlobalScoring *scoring)
{
int activeSize = active->size();
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) {
const int slot = (*active)[i];
Track ¤tTrack = gammas[slot];
// Init a track with the needed data to call into G4HepEm.
G4HepEmTrack emTrack;
emTrack.SetEKin(currentTrack.energy);
// For now, just assume a single material.
int theMCIndex = 1;
emTrack.SetMCIndex(theMCIndex);
// Sample the `number-of-interaction-left` and put it into the track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = currentTrack.numIALeft[ip];
if (numIALeft <= 0) {
numIALeft = -::log(currentTrack.Uniform());
currentTrack.numIALeft[ip] = numIALeft;
}
emTrack.SetNumIALeft(numIALeft, ip);
}
// Call G4HepEm to compute the physics step limit.
G4HepEmGammaManager::HowFar(&g4HepEmData, &g4HepEmPars, &emTrack);
// Get result into variables.
double geometricalStepLengthFromPhysics = emTrack.GetGStepLength();
int winnerProcessIndex = emTrack.GetWinnerProcessIndex();
// Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we
// also need to carry them over!
// Check if there's a volume boundary in between.
vecgeom::NavStateIndex nextState;
double geometryStepLength = BVHNavigator::ComputeStepAndNextVolume(
currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics, currentTrack.navState, nextState);
currentTrack.pos += geometryStepLength * currentTrack.dir;
if (nextState.IsOnBoundary()) {
emTrack.SetGStepLength(geometryStepLength);
emTrack.SetOnBoundary(true);
}
G4HepEmGammaManager::UpdateNumIALeft(&emTrack);
// Save the `number-of-interaction-left` in our track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = emTrack.GetNumIALeft(ip);
currentTrack.numIALeft[ip] = numIALeft;
}
if (nextState.IsOnBoundary()) {
// For now, just count that we hit something.
atomicAdd(&scoring->hits, 1);
// Kill the particle if it left the world.
if (nextState.Top() != nullptr) {
activeQueue->push_back(slot);
// Move to the next boundary.
BVHNavigator::RelocateToNextVolume(currentTrack.pos, currentTrack.dir, nextState);
currentTrack.navState = nextState;
}
continue;
} else if (winnerProcessIndex < 0) {
// No discrete process, move on.
activeQueue->push_back(slot);
continue;
}
// Reset number of interaction left for the winner discrete process.
// (Will be resampled in the next iteration.)
currentTrack.numIALeft[winnerProcessIndex] = -1.0;
// Perform the discrete interaction.
RanluxppDoubleEngine rnge(¤tTrack.rngState);
// We might need one branched RNG state, prepare while threads are synchronized.
RanluxppDouble newRNG(currentTrack.rngState.Branch());
const double energy = currentTrack.energy;
switch (winnerProcessIndex) {
case 0: {
// Invoke gamma conversion to e-/e+ pairs, if the energy is above the threshold.
if (energy < 2 * copcore::units::kElectronMassC2) {
activeQueue->push_back(slot);
continue;
}
double logEnergy = ::log(energy);
double elKinEnergy, posKinEnergy;
G4HepEmGammaInteractionConversion::SampleKinEnergies(&g4HepEmData, energy, logEnergy, theMCIndex, elKinEnergy,
posKinEnergy, &rnge);
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirSecondaryEl[3], dirSecondaryPos[3];
G4HepEmGammaInteractionConversion::SampleDirections(dirPrimary, dirSecondaryEl, dirSecondaryPos, elKinEnergy,
posKinEnergy, &rnge);
Track &electron = secondaries.electrons.NextTrack();
Track &positron = secondaries.positrons.NextTrack();
atomicAdd(&scoring->secondaries, 2);
electron.InitAsSecondary(/*parent=*/currentTrack);
electron.rngState = newRNG;
electron.energy = elKinEnergy;
electron.dir.Set(dirSecondaryEl[0], dirSecondaryEl[1], dirSecondaryEl[2]);
positron.InitAsSecondary(/*parent=*/currentTrack);
// Reuse the RNG state of the dying track.
positron.rngState = currentTrack.rngState;
positron.energy = posKinEnergy;
positron.dir.Set(dirSecondaryPos[0], dirSecondaryPos[1], dirSecondaryPos[2]);
// The current track is killed by not enqueuing into the next activeQueue.
break;
}
case 1: {
// Invoke Compton scattering of gamma.
constexpr double LowEnergyThreshold = 100 * copcore::units::eV;
if (energy < LowEnergyThreshold) {
activeQueue->push_back(slot);
continue;
}
const double origDirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirPrimary[3];
const double newEnergyGamma =
G4HepEmGammaInteractionCompton::SamplePhotonEnergyAndDirection(energy, dirPrimary, origDirPrimary, &rnge);
vecgeom::Vector3D<double> newDirGamma(dirPrimary[0], dirPrimary[1], dirPrimary[2]);
const double energyEl = energy - newEnergyGamma;
if (energyEl > LowEnergyThreshold) {
// Create a secondary electron and sample/compute directions.
Track &electron = secondaries.electrons.NextTrack();
atomicAdd(&scoring->secondaries, 1);
electron.InitAsSecondary(/*parent=*/currentTrack);
electron.rngState = newRNG;
electron.energy = energyEl;
electron.dir = energy * currentTrack.dir - newEnergyGamma * newDirGamma;
electron.dir.Normalize();
} else {
atomicAdd(&scoring->energyDeposit, energyEl);
}
// Check the new gamma energy and deposit if below threshold.
if (newEnergyGamma > LowEnergyThreshold) {
currentTrack.energy = newEnergyGamma;
currentTrack.dir = newDirGamma;
// The current track continues to live.
activeQueue->push_back(slot);
} else {
atomicAdd(&scoring->energyDeposit, newEnergyGamma);
// The current track is killed by not enqueuing into the next activeQueue.
}
break;
}
case 2: {
// Invoke photoelectric process: right now only absorb the gamma.
atomicAdd(&scoring->energyDeposit, energy);
// The current track is killed by not enqueuing into the next activeQueue.
break;
}
}
}
}
| 986aaf95aaeaa6e4bf4adcb08bcefd874810d0db.cu | // SPDX-FileCopyrightText: 2021 CERN
// SPDX-License-Identifier: Apache-2.0
#include "example11.cuh"
#include <AdePT/BVHNavigator.h>
#include <CopCore/PhysicalConstants.h>
#include <G4HepEmGammaManager.hh>
#include <G4HepEmTrack.hh>
#include <G4HepEmGammaInteractionCompton.hh>
#include <G4HepEmGammaInteractionConversion.hh>
// Pull in implementation.
#include <G4HepEmGammaManager.icc>
#include <G4HepEmGammaInteractionCompton.icc>
#include <G4HepEmGammaInteractionConversion.icc>
__global__ void TransportGammas(Track *gammas, const adept::MParray *active, Secondaries secondaries,
adept::MParray *activeQueue, GlobalScoring *scoring)
{
int activeSize = active->size();
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < activeSize; i += blockDim.x * gridDim.x) {
const int slot = (*active)[i];
Track ¤tTrack = gammas[slot];
// Init a track with the needed data to call into G4HepEm.
G4HepEmTrack emTrack;
emTrack.SetEKin(currentTrack.energy);
// For now, just assume a single material.
int theMCIndex = 1;
emTrack.SetMCIndex(theMCIndex);
// Sample the `number-of-interaction-left` and put it into the track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = currentTrack.numIALeft[ip];
if (numIALeft <= 0) {
numIALeft = -std::log(currentTrack.Uniform());
currentTrack.numIALeft[ip] = numIALeft;
}
emTrack.SetNumIALeft(numIALeft, ip);
}
// Call G4HepEm to compute the physics step limit.
G4HepEmGammaManager::HowFar(&g4HepEmData, &g4HepEmPars, &emTrack);
// Get result into variables.
double geometricalStepLengthFromPhysics = emTrack.GetGStepLength();
int winnerProcessIndex = emTrack.GetWinnerProcessIndex();
// Leave the range and MFP inside the G4HepEmTrack. If we split kernels, we
// also need to carry them over!
// Check if there's a volume boundary in between.
vecgeom::NavStateIndex nextState;
double geometryStepLength = BVHNavigator::ComputeStepAndNextVolume(
currentTrack.pos, currentTrack.dir, geometricalStepLengthFromPhysics, currentTrack.navState, nextState);
currentTrack.pos += geometryStepLength * currentTrack.dir;
if (nextState.IsOnBoundary()) {
emTrack.SetGStepLength(geometryStepLength);
emTrack.SetOnBoundary(true);
}
G4HepEmGammaManager::UpdateNumIALeft(&emTrack);
// Save the `number-of-interaction-left` in our track.
for (int ip = 0; ip < 3; ++ip) {
double numIALeft = emTrack.GetNumIALeft(ip);
currentTrack.numIALeft[ip] = numIALeft;
}
if (nextState.IsOnBoundary()) {
// For now, just count that we hit something.
atomicAdd(&scoring->hits, 1);
// Kill the particle if it left the world.
if (nextState.Top() != nullptr) {
activeQueue->push_back(slot);
// Move to the next boundary.
BVHNavigator::RelocateToNextVolume(currentTrack.pos, currentTrack.dir, nextState);
currentTrack.navState = nextState;
}
continue;
} else if (winnerProcessIndex < 0) {
// No discrete process, move on.
activeQueue->push_back(slot);
continue;
}
// Reset number of interaction left for the winner discrete process.
// (Will be resampled in the next iteration.)
currentTrack.numIALeft[winnerProcessIndex] = -1.0;
// Perform the discrete interaction.
RanluxppDoubleEngine rnge(¤tTrack.rngState);
// We might need one branched RNG state, prepare while threads are synchronized.
RanluxppDouble newRNG(currentTrack.rngState.Branch());
const double energy = currentTrack.energy;
switch (winnerProcessIndex) {
case 0: {
// Invoke gamma conversion to e-/e+ pairs, if the energy is above the threshold.
if (energy < 2 * copcore::units::kElectronMassC2) {
activeQueue->push_back(slot);
continue;
}
double logEnergy = std::log(energy);
double elKinEnergy, posKinEnergy;
G4HepEmGammaInteractionConversion::SampleKinEnergies(&g4HepEmData, energy, logEnergy, theMCIndex, elKinEnergy,
posKinEnergy, &rnge);
double dirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirSecondaryEl[3], dirSecondaryPos[3];
G4HepEmGammaInteractionConversion::SampleDirections(dirPrimary, dirSecondaryEl, dirSecondaryPos, elKinEnergy,
posKinEnergy, &rnge);
Track &electron = secondaries.electrons.NextTrack();
Track &positron = secondaries.positrons.NextTrack();
atomicAdd(&scoring->secondaries, 2);
electron.InitAsSecondary(/*parent=*/currentTrack);
electron.rngState = newRNG;
electron.energy = elKinEnergy;
electron.dir.Set(dirSecondaryEl[0], dirSecondaryEl[1], dirSecondaryEl[2]);
positron.InitAsSecondary(/*parent=*/currentTrack);
// Reuse the RNG state of the dying track.
positron.rngState = currentTrack.rngState;
positron.energy = posKinEnergy;
positron.dir.Set(dirSecondaryPos[0], dirSecondaryPos[1], dirSecondaryPos[2]);
// The current track is killed by not enqueuing into the next activeQueue.
break;
}
case 1: {
// Invoke Compton scattering of gamma.
constexpr double LowEnergyThreshold = 100 * copcore::units::eV;
if (energy < LowEnergyThreshold) {
activeQueue->push_back(slot);
continue;
}
const double origDirPrimary[] = {currentTrack.dir.x(), currentTrack.dir.y(), currentTrack.dir.z()};
double dirPrimary[3];
const double newEnergyGamma =
G4HepEmGammaInteractionCompton::SamplePhotonEnergyAndDirection(energy, dirPrimary, origDirPrimary, &rnge);
vecgeom::Vector3D<double> newDirGamma(dirPrimary[0], dirPrimary[1], dirPrimary[2]);
const double energyEl = energy - newEnergyGamma;
if (energyEl > LowEnergyThreshold) {
// Create a secondary electron and sample/compute directions.
Track &electron = secondaries.electrons.NextTrack();
atomicAdd(&scoring->secondaries, 1);
electron.InitAsSecondary(/*parent=*/currentTrack);
electron.rngState = newRNG;
electron.energy = energyEl;
electron.dir = energy * currentTrack.dir - newEnergyGamma * newDirGamma;
electron.dir.Normalize();
} else {
atomicAdd(&scoring->energyDeposit, energyEl);
}
// Check the new gamma energy and deposit if below threshold.
if (newEnergyGamma > LowEnergyThreshold) {
currentTrack.energy = newEnergyGamma;
currentTrack.dir = newDirGamma;
// The current track continues to live.
activeQueue->push_back(slot);
} else {
atomicAdd(&scoring->energyDeposit, newEnergyGamma);
// The current track is killed by not enqueuing into the next activeQueue.
}
break;
}
case 2: {
// Invoke photoelectric process: right now only absorb the gamma.
atomicAdd(&scoring->energyDeposit, energy);
// The current track is killed by not enqueuing into the next activeQueue.
break;
}
}
}
}
|
b7d2bbb559ce368420fa41c3d486b2c5dbdec489.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 2
#define TC 16
#define C 96
#define N 64
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[16];
__shared__ float pad_temp_shared[1920];
__shared__ float kernel_shared[768];
float pad_temp_shared_local[8];
float kernel_shared_local[48];
compute_local[(0)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(8)] = 0.000000e+00f;
compute_local[(10)] = 0.000000e+00f;
compute_local[(12)] = 0.000000e+00f;
compute_local[(14)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
compute_local[(9)] = 0.000000e+00f;
compute_local[(11)] = 0.000000e+00f;
compute_local[(13)] = 0.000000e+00f;
compute_local[(15)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 6; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 18) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 18) % 120) / 30)) + ry_outer) < 29)) && (1 <= ((((int)threadIdx.x) * 18) % 30))) && (((((int)threadIdx.x) * 18) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + (((((int)threadIdx.x) * 18) / 120) * 784)) + (((int)blockIdx.y) * 112)) + ((((((int)threadIdx.x) * 18) % 120) / 30) * 28)) + (ry_outer * 28)) + ((((int)threadIdx.x) * 18) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 1) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 1) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 1) % 30))) && ((((((int)threadIdx.x) * 18) + 1) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 1) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 1) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 1) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 2) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 2) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 2) % 30))) && ((((((int)threadIdx.x) * 18) + 2) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 2) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 2) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 2) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 3) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 3) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 3) % 30))) && ((((((int)threadIdx.x) * 18) + 3) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 3) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 3) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 3) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 4) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 4) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 4) % 30))) && ((((((int)threadIdx.x) * 18) + 4) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 4) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 4) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 4) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 5) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 5) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 5) % 30))) && ((((((int)threadIdx.x) * 18) + 5) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 5) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 5) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 5) % 30)) - 29))] : 0.000000e+00f);
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 6) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 6) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1914) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 954) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 6) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 6) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 6) % 30))) && ((((((int)threadIdx.x) * 18) + 6) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 6) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 6) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 6) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 7) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 7) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1913) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 953) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 7) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 7) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 7) % 30))) && ((((((int)threadIdx.x) * 18) + 7) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 7) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 7) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 7) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 8) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 8) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1912) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 952) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 8) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 8) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 8) % 30))) && ((((((int)threadIdx.x) * 18) + 8) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 8) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 8) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 8) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 9) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 9) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1911) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 951) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 9))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 9) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 9) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 9) % 30))) && ((((((int)threadIdx.x) * 18) + 9) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 9) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 9) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 9) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 10) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 10) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1910) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 950) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 10))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 10) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 10) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 10) % 30))) && ((((((int)threadIdx.x) * 18) + 10) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 10) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 10) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 10) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 11) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 11) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1909) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 949) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 11))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 11) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 11) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 11) % 30))) && ((((((int)threadIdx.x) * 18) + 11) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 11) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 11) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 11) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 12) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 12) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1908) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 948) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 12))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 12) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 12) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 12) % 30))) && ((((((int)threadIdx.x) * 18) + 12) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 12) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 12) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 12) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 13) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 13) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1907) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 947) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 13))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 13) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 13) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 13) % 30))) && ((((((int)threadIdx.x) * 18) + 13) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 13) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 13) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 13) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 14) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 14) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1906) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 946) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 14))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 14) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 14) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 14) % 30))) && ((((((int)threadIdx.x) * 18) + 14) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 14) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 14) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 14) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 15) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 15) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1905) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 945) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 15))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 15) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 15) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 15) % 30))) && ((((((int)threadIdx.x) * 18) + 15) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 15) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 15) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 15) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 16) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 16) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1904) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 944) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 16))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 16) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 16) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 16) % 30))) && ((((((int)threadIdx.x) * 18) + 16) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 16) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 16) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 16) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 17) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 17) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1903) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 943) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 17))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 17) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 17) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 17) % 30))) && ((((((int)threadIdx.x) * 18) + 17) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 17) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 17) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 17) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
kernel_shared[((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 7) / 48) * 864)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 7) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 1) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 2) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 2) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 3))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 3) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 7) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 4))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 4) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 1) % 3)))];
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 16) {
if ((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 256) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) < 763) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((int)threadIdx.x) < 13) {
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 5))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 5) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 2) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 16) {
if ((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 7) / 3)) < 254) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) < 762) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((int)threadIdx.x) < 13) {
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 6))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 6) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 7) % 3)))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 1))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 2))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 3))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 120))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 121))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 122))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 123))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 96))];
kernel_shared_local[(12)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 192))];
kernel_shared_local[(18)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 288))];
kernel_shared_local[(24)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 384))];
kernel_shared_local[(30)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 480))];
kernel_shared_local[(36)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 576))];
kernel_shared_local[(42)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 672))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 1))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 97))];
kernel_shared_local[(13)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 193))];
kernel_shared_local[(19)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 289))];
kernel_shared_local[(25)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 385))];
kernel_shared_local[(31)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 481))];
kernel_shared_local[(37)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 577))];
kernel_shared_local[(43)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 673))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 2))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 98))];
kernel_shared_local[(14)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 194))];
kernel_shared_local[(20)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 290))];
kernel_shared_local[(26)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 386))];
kernel_shared_local[(32)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 482))];
kernel_shared_local[(38)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 578))];
kernel_shared_local[(44)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 674))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 3))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 99))];
kernel_shared_local[(15)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 195))];
kernel_shared_local[(21)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 291))];
kernel_shared_local[(27)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 387))];
kernel_shared_local[(33)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 483))];
kernel_shared_local[(39)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 579))];
kernel_shared_local[(45)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 675))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 4))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 100))];
kernel_shared_local[(16)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 196))];
kernel_shared_local[(22)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 292))];
kernel_shared_local[(28)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 388))];
kernel_shared_local[(34)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 484))];
kernel_shared_local[(40)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 580))];
kernel_shared_local[(46)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 676))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 5))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 101))];
kernel_shared_local[(17)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 197))];
kernel_shared_local[(23)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 293))];
kernel_shared_local[(29)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 389))];
kernel_shared_local[(35)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 485))];
kernel_shared_local[(41)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 581))];
kernel_shared_local[(47)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 677))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(12)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(18)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(24)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(30)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(36)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(42)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(12)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(18)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(24)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(30)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(36)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(42)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(13)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(19)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(25)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(31)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(37)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(43)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(13)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(19)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(25)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(31)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(37)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(43)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(14)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(20)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(26)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(32)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(38)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(44)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(14)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(20)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(26)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(32)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(38)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(44)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(15)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(21)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(27)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(33)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(39)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(45)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(15)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(21)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(27)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(33)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(39)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(45)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(16)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(22)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(28)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(34)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(40)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(46)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(16)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(22)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(28)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(34)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(40)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(46)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(17)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(23)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(29)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(35)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(41)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(47)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(17)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(23)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(29)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(35)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(41)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(47)]));
}
}
}
compute[((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 1568))] = compute_local[(2)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 3136))] = compute_local[(4)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 4704))] = compute_local[(6)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 6272))] = compute_local[(8)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 7840))] = compute_local[(10)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 9408))] = compute_local[(12)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 10976))] = compute_local[(14)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 1569))] = compute_local[(3)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 3137))] = compute_local[(5)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 4705))] = compute_local[(7)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 6273))] = compute_local[(9)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 7841))] = compute_local[(11)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 9409))] = compute_local[(13)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 10977))] = compute_local[(15)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,7,4);
dim3 block(14,4,2);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| b7d2bbb559ce368420fa41c3d486b2c5dbdec489.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 2
#define TC 16
#define C 96
#define N 64
#define H 28
#define W 28
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[16];
__shared__ float pad_temp_shared[1920];
__shared__ float kernel_shared[768];
float pad_temp_shared_local[8];
float kernel_shared_local[48];
compute_local[(0)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(8)] = 0.000000e+00f;
compute_local[(10)] = 0.000000e+00f;
compute_local[(12)] = 0.000000e+00f;
compute_local[(14)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
compute_local[(9)] = 0.000000e+00f;
compute_local[(11)] = 0.000000e+00f;
compute_local[(13)] = 0.000000e+00f;
compute_local[(15)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 6; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
pad_temp_shared[((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)))] = (((((1 <= (((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 18) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + (((((int)threadIdx.x) * 18) % 120) / 30)) + ry_outer) < 29)) && (1 <= ((((int)threadIdx.x) * 18) % 30))) && (((((int)threadIdx.x) * 18) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + (((((int)threadIdx.x) * 18) / 120) * 784)) + (((int)blockIdx.y) * 112)) + ((((((int)threadIdx.x) * 18) % 120) / 30) * 28)) + (ry_outer * 28)) + ((((int)threadIdx.x) * 18) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 1))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 1) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 1) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 1) % 30))) && ((((((int)threadIdx.x) * 18) + 1) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 1) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 1) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 1) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 2))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 2) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 2) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 2) % 30))) && ((((((int)threadIdx.x) * 18) + 2) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 2) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 2) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 2) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 3))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 3) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 3) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 3) % 30))) && ((((((int)threadIdx.x) * 18) + 3) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 3) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 3) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 3) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 4))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 4) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 4) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 4) % 30))) && ((((((int)threadIdx.x) * 18) + 4) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 4) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 4) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 4) % 30)) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 5))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 5) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 5) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 5) % 30))) && ((((((int)threadIdx.x) * 18) + 5) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 5) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 5) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 5) % 30)) - 29))] : 0.000000e+00f);
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 6) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 6) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1914) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 954) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 6))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 6) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 6) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 6) % 30))) && ((((((int)threadIdx.x) * 18) + 6) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 6) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 6) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 6) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 7) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 7) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1913) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 953) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 7))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 7) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 7) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 7) % 30))) && ((((((int)threadIdx.x) * 18) + 7) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 7) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 7) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 7) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 8) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 8) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1912) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 952) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 8))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 8) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 8) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 8) % 30))) && ((((((int)threadIdx.x) * 18) + 8) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 8) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 8) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 8) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 9) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 9) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1911) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 951) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 9))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 9) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 9) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 9) % 30))) && ((((((int)threadIdx.x) * 18) + 9) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 9) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 9) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 9) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 10) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 10) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1910) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 950) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 10))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 10) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 10) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 10) % 30))) && ((((((int)threadIdx.x) * 18) + 10) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 10) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 10) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 10) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 11) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 11) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1909) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 949) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 11))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 11) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 11) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 11) % 30))) && ((((((int)threadIdx.x) * 18) + 11) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 11) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 11) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 11) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 12) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 12) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1908) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 948) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 12))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 12) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 12) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 12) % 30))) && ((((((int)threadIdx.x) * 18) + 12) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 12) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 12) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 12) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 13) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 13) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1907) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 947) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 13))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 13) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 13) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 13) % 30))) && ((((((int)threadIdx.x) * 18) + 13) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 13) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 13) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 13) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 14) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 14) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1906) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 946) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 14))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 14) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 14) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 14) % 30))) && ((((((int)threadIdx.x) * 18) + 14) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 14) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 14) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 14) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 15) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 15) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1905) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 945) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 15))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 15) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 15) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 15) % 30))) && ((((((int)threadIdx.x) * 18) + 15) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 15) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 15) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 15) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 16) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 16) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1904) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 944) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 16))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 16) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 16) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 16) % 30))) && ((((((int)threadIdx.x) * 18) + 16) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 16) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 16) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 16) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 18) + 17) / 120)) < 16) {
if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.y) * 8)) + (((((int)threadIdx.x) * 18) + 17) / 30)) < 64) {
if ((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) < 1903) {
if (((((int)threadIdx.y) * 240) + (((int)threadIdx.x) * 18)) < 943) {
if (((int)threadIdx.x) < 13) {
pad_temp_shared[(((((((int)threadIdx.z) * 960) + (((int)threadIdx.y) * 240)) + (((int)threadIdx.x) * 18)) + 17))] = (((((1 <= (((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 17) % 120) / 30)) + ry_outer)) && ((((((int)blockIdx.y) * 4) + ((((((int)threadIdx.x) * 18) + 17) % 120) / 30)) + ry_outer) < 29)) && (1 <= (((((int)threadIdx.x) * 18) + 17) % 30))) && ((((((int)threadIdx.x) * 18) + 17) % 30) < 29)) ? data[((((((((((rc_outer * 12544) + (((int)threadIdx.z) * 6272)) + (((int)threadIdx.y) * 1568)) + ((((((int)threadIdx.x) * 18) + 17) / 120) * 784)) + (((int)blockIdx.y) * 112)) + (((((((int)threadIdx.x) * 18) + 17) % 120) / 30) * 28)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 18) + 17) % 30)) - 29))] : 0.000000e+00f);
}
}
}
}
}
kernel_shared[((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + (((((int)threadIdx.x) * 7) / 48) * 864)) + (rc_outer * 144)) + ((((((int)threadIdx.x) * 7) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 7) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 1))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 1) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 1) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 1) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 2))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 2) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 2) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 2) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 3))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 3) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 3) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 7) % 3)))];
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 4))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 4) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 4) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 1) % 3)))];
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 7) + 5) / 48)) < 16) {
if ((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + (((((int)threadIdx.x) * 7) + 5) / 3)) < 256) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) < 763) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 7)) < 379) {
if (((int)threadIdx.x) < 13) {
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 5))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 5) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 5) % 48) / 3) * 9)) + (ry_outer * 3)) + (((((int)threadIdx.x) * 7) + 2) % 3)))];
}
}
}
}
}
if ((((((int)threadIdx.z) * 8) + (((int)threadIdx.y) * 2)) + (((((int)threadIdx.x) * 7) + 6) / 48)) < 16) {
if ((((((int)threadIdx.z) * 128) + (((int)threadIdx.y) * 32)) + ((((int)threadIdx.x) * 7) / 3)) < 254) {
if ((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) < 762) {
if (((((int)threadIdx.y) * 96) + (((int)threadIdx.x) * 7)) < 378) {
if (((int)threadIdx.x) < 13) {
kernel_shared[(((((((int)threadIdx.z) * 384) + (((int)threadIdx.y) * 96)) + (((int)threadIdx.x) * 7)) + 6))] = kernel[(((((((((((int)blockIdx.z) * 13824) + (((int)threadIdx.z) * 6912)) + (((int)threadIdx.y) * 1728)) + ((((((int)threadIdx.x) * 7) + 6) / 48) * 864)) + (rc_outer * 144)) + (((((((int)threadIdx.x) * 7) + 6) % 48) / 3) * 9)) + (ry_outer * 3)) + ((((int)threadIdx.x) * 7) % 3)))];
}
}
}
}
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 8; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)))];
pad_temp_shared_local[(1)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 1))];
pad_temp_shared_local[(2)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 2))];
pad_temp_shared_local[(3)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 3))];
pad_temp_shared_local[(4)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 120))];
pad_temp_shared_local[(5)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 121))];
pad_temp_shared_local[(6)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 122))];
pad_temp_shared_local[(7)] = pad_temp_shared[(((((rc_inner_outer * 240) + (((int)threadIdx.y) * 30)) + (((int)threadIdx.x) * 2)) + 123))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)))];
kernel_shared_local[(6)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 96))];
kernel_shared_local[(12)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 192))];
kernel_shared_local[(18)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 288))];
kernel_shared_local[(24)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 384))];
kernel_shared_local[(30)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 480))];
kernel_shared_local[(36)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 576))];
kernel_shared_local[(42)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 672))];
kernel_shared_local[(1)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 1))];
kernel_shared_local[(7)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 97))];
kernel_shared_local[(13)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 193))];
kernel_shared_local[(19)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 289))];
kernel_shared_local[(25)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 385))];
kernel_shared_local[(31)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 481))];
kernel_shared_local[(37)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 577))];
kernel_shared_local[(43)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 673))];
kernel_shared_local[(2)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 2))];
kernel_shared_local[(8)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 98))];
kernel_shared_local[(14)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 194))];
kernel_shared_local[(20)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 290))];
kernel_shared_local[(26)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 386))];
kernel_shared_local[(32)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 482))];
kernel_shared_local[(38)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 578))];
kernel_shared_local[(44)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 674))];
kernel_shared_local[(3)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 3))];
kernel_shared_local[(9)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 99))];
kernel_shared_local[(15)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 195))];
kernel_shared_local[(21)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 291))];
kernel_shared_local[(27)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 387))];
kernel_shared_local[(33)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 483))];
kernel_shared_local[(39)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 579))];
kernel_shared_local[(45)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 675))];
kernel_shared_local[(4)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 4))];
kernel_shared_local[(10)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 100))];
kernel_shared_local[(16)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 196))];
kernel_shared_local[(22)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 292))];
kernel_shared_local[(28)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 388))];
kernel_shared_local[(34)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 484))];
kernel_shared_local[(40)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 580))];
kernel_shared_local[(46)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 676))];
kernel_shared_local[(5)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 5))];
kernel_shared_local[(11)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 101))];
kernel_shared_local[(17)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 197))];
kernel_shared_local[(23)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 293))];
kernel_shared_local[(29)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 389))];
kernel_shared_local[(35)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 485))];
kernel_shared_local[(41)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 581))];
kernel_shared_local[(47)] = kernel_shared[((((((int)threadIdx.z) * 48) + (rc_inner_outer * 6)) + 677))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(6)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(12)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(18)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(24)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(30)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(36)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(42)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(12)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(18)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(24)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(30)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(36)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(42)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(7)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(13)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(19)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(25)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(31)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(37)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(43)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(13)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(19)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(25)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(31)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(37)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(43)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(8)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(14)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(20)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(26)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(32)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(38)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(44)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(14)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(20)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(26)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(32)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(38)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(44)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(15)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(21)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(27)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(33)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(39)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(45)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(9)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(15)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(21)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(27)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(33)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(39)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(45)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(10)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(16)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(22)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(28)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(34)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(40)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(46)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(10)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(16)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(22)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(28)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(34)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(40)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(46)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(5)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(11)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(17)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(23)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(29)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(35)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(41)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(47)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(5)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(11)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(17)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(23)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(29)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(35)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(41)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(47)]));
}
}
}
compute[((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)))] = compute_local[(0)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 1568))] = compute_local[(2)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 3136))] = compute_local[(4)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 4704))] = compute_local[(6)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 6272))] = compute_local[(8)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 7840))] = compute_local[(10)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 9408))] = compute_local[(12)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 10976))] = compute_local[(14)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 1))] = compute_local[(1)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 1569))] = compute_local[(3)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 3137))] = compute_local[(5)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 4705))] = compute_local[(7)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 6273))] = compute_local[(9)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 7841))] = compute_local[(11)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 9409))] = compute_local[(13)];
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)threadIdx.x) * 2)) + 10977))] = compute_local[(15)];
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(1,7,4);
dim3 block(14,4,2);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
4bd4f39082167a34ac5f75a14f9cd4adf3e096c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <string>
#include <iostream>
#include "oddsEnds.h"
extern __global__ void sequence_gpu(int *d_ptr, int length);
int test_zerocopy()
{
using namespace std;
cout << "Running zero-copy test..." << endl;
const int N = 100;
int *d_ptr;
int *h_ptr;
ASSERT(hipSuccess == hipHostMalloc(&h_ptr, N * sizeof(int), hipHostMallocMapped || hipHostMallocWriteCombined), "Host allocation of " << N << " ints failed", -1);
cout << "Memory allocated successfully" << endl;
ASSERT(hipSuccess == hipHostGetDevicePointer(&d_ptr, h_ptr, 0), "Get device pointer failed", -1);
dim3 cudaBlockSize(32,1,1);
dim3 cudaGridSize((N + cudaBlockSize.x - 1) / cudaBlockSize.x, 1, 1);
hipLaunchKernelGGL(( sequence_gpu), dim3(cudaGridSize), dim3(cudaBlockSize), 0, 0, d_ptr, N);
ASSERT(hipSuccess == hipGetLastError(), "Kernel launch failed", -1);
ASSERT(hipSuccess == hipDeviceSynchronize(), "Kernel synchronization failed", -1);
sequence_cpu(h_ptr, N);
cout << "CUDA and CPU algorithm implementations finished" << endl;
int *h_d_ptr;
ASSERT(hipSuccess == hipHostMalloc(&h_d_ptr, N *sizeof(int)), "Host allocation of " << N << " ints failed", -1);
ASSERT(hipSuccess == hipMemcpy(h_d_ptr, d_ptr, N *sizeof(int), hipMemcpyDeviceToHost), "Copy of " << N << " ints from device to host failed", -1);
bool bValid = true;
for (int i=0; i<N && bValid; i++)
if (h_ptr[i] != h_d_ptr[i])
{
bValid = false;
break;
}
ASSERT(hipSuccess == hipHostFree(h_ptr), "Host deallocation failed", -1);
ASSERT(hipSuccess == hipHostFree(h_d_ptr), "Host deallocation failed", -1);
cout << "Memory deallocated successfully" << endl;
cout << "TEST Results: " << bValid << endl;
return bValid;
}
| 4bd4f39082167a34ac5f75a14f9cd4adf3e096c8.cu | #include <string>
#include <iostream>
#include "oddsEnds.h"
extern __global__ void sequence_gpu(int *d_ptr, int length);
int test_zerocopy()
{
using namespace std;
cout << "Running zero-copy test..." << endl;
const int N = 100;
int *d_ptr;
int *h_ptr;
ASSERT(cudaSuccess == cudaHostAlloc(&h_ptr, N * sizeof(int), cudaHostAllocMapped || cudaHostAllocWriteCombined), "Host allocation of " << N << " ints failed", -1);
cout << "Memory allocated successfully" << endl;
ASSERT(cudaSuccess == cudaHostGetDevicePointer(&d_ptr, h_ptr, 0), "Get device pointer failed", -1);
dim3 cudaBlockSize(32,1,1);
dim3 cudaGridSize((N + cudaBlockSize.x - 1) / cudaBlockSize.x, 1, 1);
sequence_gpu<<<cudaGridSize, cudaBlockSize>>>(d_ptr, N);
ASSERT(cudaSuccess == cudaGetLastError(), "Kernel launch failed", -1);
ASSERT(cudaSuccess == cudaDeviceSynchronize(), "Kernel synchronization failed", -1);
sequence_cpu(h_ptr, N);
cout << "CUDA and CPU algorithm implementations finished" << endl;
int *h_d_ptr;
ASSERT(cudaSuccess == cudaMallocHost(&h_d_ptr, N *sizeof(int)), "Host allocation of " << N << " ints failed", -1);
ASSERT(cudaSuccess == cudaMemcpy(h_d_ptr, d_ptr, N *sizeof(int), cudaMemcpyDeviceToHost), "Copy of " << N << " ints from device to host failed", -1);
bool bValid = true;
for (int i=0; i<N && bValid; i++)
if (h_ptr[i] != h_d_ptr[i])
{
bValid = false;
break;
}
ASSERT(cudaSuccess == cudaFreeHost(h_ptr), "Host deallocation failed", -1);
ASSERT(cudaSuccess == cudaFreeHost(h_d_ptr), "Host deallocation failed", -1);
cout << "Memory deallocated successfully" << endl;
cout << "TEST Results: " << bValid << endl;
return bValid;
}
|
8a0cda95890ffc1a0bf1ddbe31c4b951b7f9c9be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
const int threshold=10;
const int m=1024;
__global__ void selection_sort(int *data, int left, int right)
{
int temp,i,j;
//printf("in selection sort\n");
for(i=left;i<right; i++)
for(j=i+1;j<=right;j++)
if(data[i] > data[j])
{
temp=data[i];
data[i]=data[j];
data[j]=temp;
}
}
__global__ void partition(int *a,int left,int right,int pivot,int *al,int *ah)
{
int l,h,i;
int diff=(right-left+1)/m;
int k1=threadIdx.x*diff+left;
int k2=k1+diff-1;
if(threadIdx.x==m-1)
k2=right;
// printf("in partition %d %d and k1= %d k2= %d \n",left,right,k1,k2);
l=h=k1;
for(i=k1;i<=k2;i++)
{
al[i]=ah[i]=-999;
}
for(i=k1;i<=k2;i++)
{
if(a[i] < pivot)
{
al[l++]=a[i];
}
else
{
if(a[i] > pivot)
{
ah[h++]=a[i];
}
}
}
}
//__global__
void quicksort(int *a, const int left, const int right)
{
//printf("in quick sort %d %d \n",left,right);
if (right-left <= threshold)
{
int *ad;
hipMalloc((void **)&ad,(right-left+1)*sizeof(int));
hipMemcpy(ad,a,(right-left+1)*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( selection_sort), dim3(1),dim3(1), 0, 0, ad, left, right);
hipMemcpy(a,ad,(right-left+1)*sizeof(int),hipMemcpyDeviceToHost);
return;
}
int pivot=a[left];
int *al,*ah;
int *ad;
hipMalloc((void **)&ad,(right-left+1)*sizeof(int));
hipMalloc((void **)&al,(right-left+1)*sizeof(int));
hipMalloc((void **)&ah,(right-left+1)*sizeof(int));
hipMemcpy(ad,a,(right-left+1)*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( partition), dim3(1),dim3(m), 0, 0, ad,left,right,pivot,al,ah);
int al_h[right-left+1],ah_h[right-left+1];
hipMemcpy(al_h,al,(right-left+1)*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(ah_h,ah,(right-left+1)*sizeof(int),hipMemcpyDeviceToHost);
int i=0,k=0;
while(i < right-left+1)
{
while(al_h[i]==-999 && i < right-left+1)
i++;
while(al_h[i]!=-999 && i < right-left+1)
{
al_h[k++]=al_h[i++];
}
}
//quicksort<<<1,1>>>(al,0,k-1);
quicksort(al_h,0,k-1);
int p=left;
int x=0;
// printf("array al");
while(x < k)
{
a[p++]=al_h[x++];
//printf("%d ",a[p]);
}
a[p]=pivot;
i=0;
k=0;
while(i < right-left+1)
{
while(ah_h[i]==-999 && i < right-left+1)
i++;
while(ah_h[i]!=-999 && i < right-left+1)
{
ah_h[k++]=ah_h[i++];
}
}
//quicksort<<<1,1>>>(ah,0,k-1);
quicksort(ah_h,0,k-1);
i=0;
p++;
while(i < k)
{
a[p++]=ah_h[i++];
}
}
int main()
{
int n ;
printf("\n Enter number of elements :"); //you can give any large number
scanf("%d",&n);
int a[n];
time_t t;
srand((unsigned)time(&t));
for (unsigned i = 0 ; i < n ; i++)
{
a[i]=rand()%n;
}
printf("\n\n original array\n");
for(int i=0; i < n;i++)
printf("%d\t ",a[i]);
quicksort(a,0,n-1);
printf("\n\n after sorting\n");
for(int i=0; i < n;i++)
printf("%d\t ",a[i]);
}/*
**************************************output**********************************************************
Enter number of elements :1001
original array
372 714 453 297 548 632 339 131 544 573 559 322 300 274
5 358 276 593 572 362 360 950 637 955 823 472 605 975
366 695 247 738 408 700 725 956 22 64 778 566 327 336 889
627 300 584 675 577 177 247 629 227 887 265 872 709 427
476 374 793 171 621 531 269 10 946 916 32 9 693 599 336 719
177 654 19 761 328 286 628 265 915 855 842 870 417 241 987
893 615 780 754 926 0 23 936 946 939 659 956 321 257 982
730 434 635 439 885 654 415 513 609 19 57 451 579 474 692
566 57 997 345 811 922 35 834 547 981 462 205 626 473 462
608 894 586 933 332 471 586 748 674 886 457 421 336 36 586
718 602 643 714 637 453 325 672 978 872 342 129 768 969
603 920 576 186 506 508 518 667 785 956 30 670 103 451 696
139 36 413 431 369 817 758 823 141 119 490 703 461 619 470
429 912 390 695 97 586 894 306 943 678 952 663 37 54 113 423
193 841 836 314 900 652 71 722 483 190 211 185 342 521 656
771 432 736 466 220 321 49 526 954 417 477 616 454 532 419
877 415 950 402 730 850 53 491 261 536 372 473 411 714 684
66 174 806 802 330 25 813 379 241 456 796 409 71 940 941
491 816 45 130 217 465 980 961 647 241 186 18 404 598 422
87 354 286 893 847 617 609 349 996 850 806 482 949 877
421 579 57 237 625 188 144 780 858 796 426 98 672 134 502
960 246 279 4 533 863 851 149 161 199 835 701 695 316 650
262 738 228 9 665 853 888 499 323 746 294 749 534 657 574
727 306 820 5 310 352 558 160 191 719 50 26 110 435 342 760
697 770 988 707 434 531 594 624 854 29 608 292 564 264 866
290 571 686 986 881 728 234 732 920 643 472 636 753 907
978 512 294 438 190 0 562 411 284 185 955 4 794 937 568
748 803 548 8 178 223 580 596 457 311 515 100 473 150 853
69 819 55 363 947 936 53 508 36 338 384 991 342 868 927 600
615 419 147 314 287 60 894 884 518 895 88 308 367 930 851
126 748 596 490 384 531 543 582 567 571 966 247 913 523
174 202 829 283 39 142 571 100 35 144 308 930 232 616 987
161 156 803 599 753 292 983 974 836 565 231 406 220 478
9 744 342 211 572 626 942 714 887 41 439 30 349 58 262 655
735 114 811 537 713 253 830 386 227 355 951 458 451 861
626 460 604 969 362 866 284 303 269 861 34 708 891 383 766
843 728 500 957 228 37 360 482 557 746 399 912 386 547 52
246 863 513 541 521 565 406 805 558 676 665 592 73 245 665
530 88 82 29 735 310 757 94 482 3 840 571 605 225 117 658 162
981 861 703 191 425 799 997 673 164 351 264 238 597 619
768 685 701 487 419 10 935 204 183 938 43 754 543 960 872
200 121 542 60 824 733 175 312 419 848 477 771 802 405 57
420 172 432 811 349 541 821 283 745 3 912 789 758 454 438
319 344 559 551 94 72 974 960 384 83 497 861 544 298 265 601
408 127 723 218 167 263 729 140 8 423 51 487 871 195 925
880 229 173 430 323 245 93 973 629 176 469 490 410 457 445
10 555 263 733 463 430 687 192 260 385 305 312 872 175
507 486 54 427 659 174 440 904 958 413 222 825 572 402 234
720 538 936 965 801 358 428 921 44 310 180 429 615 182 991
480 380 166 534 807 515 708 246 108 355 659 331 179 922
733 414 641 270 349 605 761 707 723 371 442 32 242 871 337
424 552 817 804 718 350 610 233 748 857 341 103 205 362
973 817 95 386 457 55 425 753 817 132 475 878 574 198 119
134 535 544 686 352 347 94 392 648 327 831 194 358 624 89
721 596 907 506 983 53 561 407 806 67 229 972 946 493 169
64 318 394 298 694 436 336 788 829 674 805 349 868 163 973
647 574 258 553 79 931 607 330 338 102 88 257 764 724 441 933
788 759 17 777 142 453 112 931 972 786 735 320 343 588 983 990
852 241 233 621 171 840 642 199 632 730 147 396 453 588 18 931
36 35 397 178 179 509 108 150 985 534 161 327 812 834 7 354
765 240 665 937 770 306 826 401 726 663 487 178 250 506 800
977 231 196 155 100 396 954 942 380 177 102 398 680 626 405
33 391 335 388 17 104 695 533 195 420 196 683 289 446 879 779
423 800 975 268 901 60 221 842 441 89 634 529 769 259 624 492
650 959 880 357 62 574 891 948 685 86 630 664 222 508 442 335
308 416 603 899 477 515 740 608 604 63 827 372 322 450 554 663
408 433 19 160 698 910 107 72
after sorting
0 0 3 3 4 4 5 7 8 8 9 9 9 10 17 17 18 18 19 19 19 17 19 22 23 25 26 29 29
30 30 32 33 34 35 36 37 39 41 43 44 45 49 50 51 52 53 53 53 54 55 55 53 53 55 53 36 35
33 57 58 60 60 60 62 63 60 62 63 63 64 66 67 69 71 72 72 73 79 82 83 86 87 88 88 88 89
89 93 94 94 94 95 72 97 98 100 100 100 102 102 103 104 107 108 108
110 112 113 114 117 119 121 126 127 107 72 129 130 72 72 131 132
134 134 139 140 141 142 142 144 147 147 149 150 150 155 156
160 160 161 162 163 164 166 167 169 155 160 160 171 172 173
174 174 174 175 175 176 160 177 178 178 178 179 179 180 182
183 185 185 186 188 190 190 191 191 192 193 194 195 195 196
196 198 199 199 200 202 204 196 205 211 211 217 218 220 220
221 222 222 223 225 222 227 228 228 229 229 231 231 232 233
233 234 234 237 238 240 241 242 245 245 246 246 246 233 240
231 222 222 160 247 250 253 257 258 259 260 261 262 262 263
263 264 264 259 265 268 269 269 270 268 259 222 160 274 276
279 283 283 284 284 286 287 289 290 292 292 294 294 289 297
298 298 300 303 305 306 308 308 308 310 310 310 311 312 312
314 316 318 319 320 308 306 308 321 308 308 322 323 323 325
327 328 330 330 331 332 335 335 336 337 338 338 338 335 335
335 335 322 339 341 342 342 342 342 342 343 344 345 347 349
349 349 349 349 350 351 352 352 354 355 355 357 357 358 360
360 362 362 362 363 366 367 369 371 354 357 372 374 379 380
380 383 384 384 384 385 386 386 386 388 390 391 392 394 396
396 397 398 399 401 402 404 405 405 406 406 407 391 408 409
410 411 411 413 413 414 415 415 416 417 419 419 419 419 420
420 421 422 423 423 423 424 425 425 426 423 416 427 428 429
429 430 430 431 432 432 433 434 435 436 438 438 439 440 441
441 442 442 445 446 450 451 451 451 450 433 433 408 433 453
454 454 456 457 457 457 457 458 460 461 462 463 465 466 469
470 471 457 472 473 473 473 474 475 476 477 477 477 478 480
482 482 482 483 486 487 487 487 490 491 491 492 493 497 499
500 502 492 477 506 507 508 508 508 509 512 508 477 513 515
515 515 518 518 521 521 523 526 529 530 515 531 532 533 533
534 534 534 535 536 537 538 541 541 542 543 543 477 515 515
544 547 547 477 515 548 551 552 553 554 555 557 558 558 559
561 562 564 565 565 566 567 568 571 571 571 571 561 572 572
561 554 573 574 574 574 574 576 577 579 579 580 582 584 586
586 586 586 588 588 592 593 594 596 596 596 597 598 599 600
601 602 603 603 604 604 604 605 607 608 608 608 609 609 610
615 616 616 617 619 619 607 608 621 624 624 624 625 626 626
626 626 608 627 628 629 629 630 608 604 632 634 635 636 637
641 642 643 643 647 647 648 650 650 652 654 655 656 657 658
659 663 663 663 664 665 665 665 665 667 670 672 673 663 674
663 664 663 663 675 676 678 680 683 684 685 685 686 686 687
692 693 694 695 696 697 698 700 701 701 703 703 707 707 708
708 709 713 664 663 698 698 698 698 408 433 698 714 718 718
719 719 720 721 722 723 723 724 725 726 727 728 728 729 730
732 733 733 733 735 735 735 736 730 726 738 740 744 745 746
746 748 749 753 753 753 748 753 740 754 757 758 758 759 760
740 761 764 765 766 768 768 769 770 770 771 771 777 740 778
779 780 780 785 786 788 788 789 793 794 796 796 799 800 800
801 802 803 803 804 805 805 800 806 807 800 800 811 812 813
816 817 817 817 817 819 820 821 800 800 740 823 824 825 826
827 829 829 830 831 834 835 836 836 840 840 841 827 842 843
847 848 850 850 851 851 852 853 853 854 827 855 857 858 861
861 861 861 863 863 866 866 868 868 870 871 871 872 877 877
878 879 880 880 881 884 885 886 842 880 827 887 888 827 889
891 891 893 894 895 899 900 901 904 907 907 910 912 913 899
910 899 910 910 915 916 920 920 921 922 922 925 926 927 930
930 931 931 931 933 933 935 936 937 937 938 939 940 941 942
942 943 937 942 946 947 948 949 899 910 910 950 951 952 954
954 955 956 957 958 959 960 960 960 961 965 966 969 972 972
973 973 973 974 974 959 972 959 975 977 978 978 980 981 981
982 983 983 983 985 986 987 988 990 991 991 996 997 997 977
959 959 910 433 698 910 107 72 */
| 8a0cda95890ffc1a0bf1ddbe31c4b951b7f9c9be.cu | #include<stdio.h>
const int threshold=10;
const int m=1024;
__global__ void selection_sort(int *data, int left, int right)
{
int temp,i,j;
//printf("in selection sort\n");
for(i=left;i<right; i++)
for(j=i+1;j<=right;j++)
if(data[i] > data[j])
{
temp=data[i];
data[i]=data[j];
data[j]=temp;
}
}
__global__ void partition(int *a,int left,int right,int pivot,int *al,int *ah)
{
int l,h,i;
int diff=(right-left+1)/m;
int k1=threadIdx.x*diff+left;
int k2=k1+diff-1;
if(threadIdx.x==m-1)
k2=right;
// printf("in partition %d %d and k1= %d k2= %d \n",left,right,k1,k2);
l=h=k1;
for(i=k1;i<=k2;i++)
{
al[i]=ah[i]=-999;
}
for(i=k1;i<=k2;i++)
{
if(a[i] < pivot)
{
al[l++]=a[i];
}
else
{
if(a[i] > pivot)
{
ah[h++]=a[i];
}
}
}
}
//__global__
void quicksort(int *a, const int left, const int right)
{
//printf("in quick sort %d %d \n",left,right);
if (right-left <= threshold)
{
int *ad;
cudaMalloc((void **)&ad,(right-left+1)*sizeof(int));
cudaMemcpy(ad,a,(right-left+1)*sizeof(int),cudaMemcpyHostToDevice);
selection_sort<<<1,1>>>(ad, left, right);
cudaMemcpy(a,ad,(right-left+1)*sizeof(int),cudaMemcpyDeviceToHost);
return;
}
int pivot=a[left];
int *al,*ah;
int *ad;
cudaMalloc((void **)&ad,(right-left+1)*sizeof(int));
cudaMalloc((void **)&al,(right-left+1)*sizeof(int));
cudaMalloc((void **)&ah,(right-left+1)*sizeof(int));
cudaMemcpy(ad,a,(right-left+1)*sizeof(int),cudaMemcpyHostToDevice);
partition<<<1,m>>>(ad,left,right,pivot,al,ah);
int al_h[right-left+1],ah_h[right-left+1];
cudaMemcpy(al_h,al,(right-left+1)*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(ah_h,ah,(right-left+1)*sizeof(int),cudaMemcpyDeviceToHost);
int i=0,k=0;
while(i < right-left+1)
{
while(al_h[i]==-999 && i < right-left+1)
i++;
while(al_h[i]!=-999 && i < right-left+1)
{
al_h[k++]=al_h[i++];
}
}
//quicksort<<<1,1>>>(al,0,k-1);
quicksort(al_h,0,k-1);
int p=left;
int x=0;
// printf("array al");
while(x < k)
{
a[p++]=al_h[x++];
//printf("%d ",a[p]);
}
a[p]=pivot;
i=0;
k=0;
while(i < right-left+1)
{
while(ah_h[i]==-999 && i < right-left+1)
i++;
while(ah_h[i]!=-999 && i < right-left+1)
{
ah_h[k++]=ah_h[i++];
}
}
//quicksort<<<1,1>>>(ah,0,k-1);
quicksort(ah_h,0,k-1);
i=0;
p++;
while(i < k)
{
a[p++]=ah_h[i++];
}
}
int main()
{
int n ;
printf("\n Enter number of elements :"); //you can give any large number
scanf("%d",&n);
int a[n];
time_t t;
srand((unsigned)time(&t));
for (unsigned i = 0 ; i < n ; i++)
{
a[i]=rand()%n;
}
printf("\n\n original array\n");
for(int i=0; i < n;i++)
printf("%d\t ",a[i]);
quicksort(a,0,n-1);
printf("\n\n after sorting\n");
for(int i=0; i < n;i++)
printf("%d\t ",a[i]);
}/*
**************************************output**********************************************************
Enter number of elements :1001
original array
372 714 453 297 548 632 339 131 544 573 559 322 300 274
5 358 276 593 572 362 360 950 637 955 823 472 605 975
366 695 247 738 408 700 725 956 22 64 778 566 327 336 889
627 300 584 675 577 177 247 629 227 887 265 872 709 427
476 374 793 171 621 531 269 10 946 916 32 9 693 599 336 719
177 654 19 761 328 286 628 265 915 855 842 870 417 241 987
893 615 780 754 926 0 23 936 946 939 659 956 321 257 982
730 434 635 439 885 654 415 513 609 19 57 451 579 474 692
566 57 997 345 811 922 35 834 547 981 462 205 626 473 462
608 894 586 933 332 471 586 748 674 886 457 421 336 36 586
718 602 643 714 637 453 325 672 978 872 342 129 768 969
603 920 576 186 506 508 518 667 785 956 30 670 103 451 696
139 36 413 431 369 817 758 823 141 119 490 703 461 619 470
429 912 390 695 97 586 894 306 943 678 952 663 37 54 113 423
193 841 836 314 900 652 71 722 483 190 211 185 342 521 656
771 432 736 466 220 321 49 526 954 417 477 616 454 532 419
877 415 950 402 730 850 53 491 261 536 372 473 411 714 684
66 174 806 802 330 25 813 379 241 456 796 409 71 940 941
491 816 45 130 217 465 980 961 647 241 186 18 404 598 422
87 354 286 893 847 617 609 349 996 850 806 482 949 877
421 579 57 237 625 188 144 780 858 796 426 98 672 134 502
960 246 279 4 533 863 851 149 161 199 835 701 695 316 650
262 738 228 9 665 853 888 499 323 746 294 749 534 657 574
727 306 820 5 310 352 558 160 191 719 50 26 110 435 342 760
697 770 988 707 434 531 594 624 854 29 608 292 564 264 866
290 571 686 986 881 728 234 732 920 643 472 636 753 907
978 512 294 438 190 0 562 411 284 185 955 4 794 937 568
748 803 548 8 178 223 580 596 457 311 515 100 473 150 853
69 819 55 363 947 936 53 508 36 338 384 991 342 868 927 600
615 419 147 314 287 60 894 884 518 895 88 308 367 930 851
126 748 596 490 384 531 543 582 567 571 966 247 913 523
174 202 829 283 39 142 571 100 35 144 308 930 232 616 987
161 156 803 599 753 292 983 974 836 565 231 406 220 478
9 744 342 211 572 626 942 714 887 41 439 30 349 58 262 655
735 114 811 537 713 253 830 386 227 355 951 458 451 861
626 460 604 969 362 866 284 303 269 861 34 708 891 383 766
843 728 500 957 228 37 360 482 557 746 399 912 386 547 52
246 863 513 541 521 565 406 805 558 676 665 592 73 245 665
530 88 82 29 735 310 757 94 482 3 840 571 605 225 117 658 162
981 861 703 191 425 799 997 673 164 351 264 238 597 619
768 685 701 487 419 10 935 204 183 938 43 754 543 960 872
200 121 542 60 824 733 175 312 419 848 477 771 802 405 57
420 172 432 811 349 541 821 283 745 3 912 789 758 454 438
319 344 559 551 94 72 974 960 384 83 497 861 544 298 265 601
408 127 723 218 167 263 729 140 8 423 51 487 871 195 925
880 229 173 430 323 245 93 973 629 176 469 490 410 457 445
10 555 263 733 463 430 687 192 260 385 305 312 872 175
507 486 54 427 659 174 440 904 958 413 222 825 572 402 234
720 538 936 965 801 358 428 921 44 310 180 429 615 182 991
480 380 166 534 807 515 708 246 108 355 659 331 179 922
733 414 641 270 349 605 761 707 723 371 442 32 242 871 337
424 552 817 804 718 350 610 233 748 857 341 103 205 362
973 817 95 386 457 55 425 753 817 132 475 878 574 198 119
134 535 544 686 352 347 94 392 648 327 831 194 358 624 89
721 596 907 506 983 53 561 407 806 67 229 972 946 493 169
64 318 394 298 694 436 336 788 829 674 805 349 868 163 973
647 574 258 553 79 931 607 330 338 102 88 257 764 724 441 933
788 759 17 777 142 453 112 931 972 786 735 320 343 588 983 990
852 241 233 621 171 840 642 199 632 730 147 396 453 588 18 931
36 35 397 178 179 509 108 150 985 534 161 327 812 834 7 354
765 240 665 937 770 306 826 401 726 663 487 178 250 506 800
977 231 196 155 100 396 954 942 380 177 102 398 680 626 405
33 391 335 388 17 104 695 533 195 420 196 683 289 446 879 779
423 800 975 268 901 60 221 842 441 89 634 529 769 259 624 492
650 959 880 357 62 574 891 948 685 86 630 664 222 508 442 335
308 416 603 899 477 515 740 608 604 63 827 372 322 450 554 663
408 433 19 160 698 910 107 72
after sorting
0 0 3 3 4 4 5 7 8 8 9 9 9 10 17 17 18 18 19 19 19 17 19 22 23 25 26 29 29
30 30 32 33 34 35 36 37 39 41 43 44 45 49 50 51 52 53 53 53 54 55 55 53 53 55 53 36 35
33 57 58 60 60 60 62 63 60 62 63 63 64 66 67 69 71 72 72 73 79 82 83 86 87 88 88 88 89
89 93 94 94 94 95 72 97 98 100 100 100 102 102 103 104 107 108 108
110 112 113 114 117 119 121 126 127 107 72 129 130 72 72 131 132
134 134 139 140 141 142 142 144 147 147 149 150 150 155 156
160 160 161 162 163 164 166 167 169 155 160 160 171 172 173
174 174 174 175 175 176 160 177 178 178 178 179 179 180 182
183 185 185 186 188 190 190 191 191 192 193 194 195 195 196
196 198 199 199 200 202 204 196 205 211 211 217 218 220 220
221 222 222 223 225 222 227 228 228 229 229 231 231 232 233
233 234 234 237 238 240 241 242 245 245 246 246 246 233 240
231 222 222 160 247 250 253 257 258 259 260 261 262 262 263
263 264 264 259 265 268 269 269 270 268 259 222 160 274 276
279 283 283 284 284 286 287 289 290 292 292 294 294 289 297
298 298 300 303 305 306 308 308 308 310 310 310 311 312 312
314 316 318 319 320 308 306 308 321 308 308 322 323 323 325
327 328 330 330 331 332 335 335 336 337 338 338 338 335 335
335 335 322 339 341 342 342 342 342 342 343 344 345 347 349
349 349 349 349 350 351 352 352 354 355 355 357 357 358 360
360 362 362 362 363 366 367 369 371 354 357 372 374 379 380
380 383 384 384 384 385 386 386 386 388 390 391 392 394 396
396 397 398 399 401 402 404 405 405 406 406 407 391 408 409
410 411 411 413 413 414 415 415 416 417 419 419 419 419 420
420 421 422 423 423 423 424 425 425 426 423 416 427 428 429
429 430 430 431 432 432 433 434 435 436 438 438 439 440 441
441 442 442 445 446 450 451 451 451 450 433 433 408 433 453
454 454 456 457 457 457 457 458 460 461 462 463 465 466 469
470 471 457 472 473 473 473 474 475 476 477 477 477 478 480
482 482 482 483 486 487 487 487 490 491 491 492 493 497 499
500 502 492 477 506 507 508 508 508 509 512 508 477 513 515
515 515 518 518 521 521 523 526 529 530 515 531 532 533 533
534 534 534 535 536 537 538 541 541 542 543 543 477 515 515
544 547 547 477 515 548 551 552 553 554 555 557 558 558 559
561 562 564 565 565 566 567 568 571 571 571 571 561 572 572
561 554 573 574 574 574 574 576 577 579 579 580 582 584 586
586 586 586 588 588 592 593 594 596 596 596 597 598 599 600
601 602 603 603 604 604 604 605 607 608 608 608 609 609 610
615 616 616 617 619 619 607 608 621 624 624 624 625 626 626
626 626 608 627 628 629 629 630 608 604 632 634 635 636 637
641 642 643 643 647 647 648 650 650 652 654 655 656 657 658
659 663 663 663 664 665 665 665 665 667 670 672 673 663 674
663 664 663 663 675 676 678 680 683 684 685 685 686 686 687
692 693 694 695 696 697 698 700 701 701 703 703 707 707 708
708 709 713 664 663 698 698 698 698 408 433 698 714 718 718
719 719 720 721 722 723 723 724 725 726 727 728 728 729 730
732 733 733 733 735 735 735 736 730 726 738 740 744 745 746
746 748 749 753 753 753 748 753 740 754 757 758 758 759 760
740 761 764 765 766 768 768 769 770 770 771 771 777 740 778
779 780 780 785 786 788 788 789 793 794 796 796 799 800 800
801 802 803 803 804 805 805 800 806 807 800 800 811 812 813
816 817 817 817 817 819 820 821 800 800 740 823 824 825 826
827 829 829 830 831 834 835 836 836 840 840 841 827 842 843
847 848 850 850 851 851 852 853 853 854 827 855 857 858 861
861 861 861 863 863 866 866 868 868 870 871 871 872 877 877
878 879 880 880 881 884 885 886 842 880 827 887 888 827 889
891 891 893 894 895 899 900 901 904 907 907 910 912 913 899
910 899 910 910 915 916 920 920 921 922 922 925 926 927 930
930 931 931 931 933 933 935 936 937 937 938 939 940 941 942
942 943 937 942 946 947 948 949 899 910 910 950 951 952 954
954 955 956 957 958 959 960 960 960 961 965 966 969 972 972
973 973 973 974 974 959 972 959 975 977 978 978 980 981 981
982 983 983 983 985 986 987 988 990 991 991 996 997 997 977
959 959 910 433 698 910 107 72 */
|
5020a7e00387819daca452dc208854488807aef7.hip | // !!! This is a file automatically generated by hipify!!!
#include <Support/Device/SafeCudaAPI.cuh>
#include <Support/Device/SimpleKernels.cuh>
#include <Support/Device/Timer.cuh>
#include <cmath>
#include <limits>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace timer;
using xlib::byte_t;
int main() {
auto seed = std::chrono::high_resolution_clock::now().time_since_epoch()
.count();
std::mt19937_64 gen(seed);
//std::generate(v.begin(), v.end(), std::rand);
std::uniform_int_distribution<unsigned char>
distribution(0, std::numeric_limits<unsigned char>::max());
size_t size = 1024;
Timer<DEVICE> TM;
std::vector<float> H2D_time;
std::vector<float> H2D_pinned_time;
std::vector<float> D2D_time;
std::vector<float> memcpy_kernel_time;
std::vector<float> memset_time;
std::vector<float> memset_kernel_time;
std::cout << "Computing";
while (true) {
std::cout << "." << std::flush;
//======================================================================
byte_t* d_array;
if (hipMalloc(&d_array, size) != hipSuccess)
break;
auto h_array = new byte_t[size];
TM.start();
cuMemcpyToDevice(h_array, size, d_array);
TM.stop();
delete[] h_array;
H2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
byte_t* h_array_pinned;
hipHostMalloc(&h_array_pinned, size);
TM.start();
cuMemcpyToDevice(h_array_pinned, size, d_array);
TM.stop();
hipHostFree(h_array_pinned);
H2D_pinned_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
hipMemset(d_array, 0x00, size);
TM.stop();
memset_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memset(reinterpret_cast<unsigned char*>(d_array), size,
(unsigned char) 0);
TM.stop();
CHECK_CUDA_ERROR
memset_kernel_time.push_back(TM.duration());
//----------------------------------------------------------------------
byte_t* d_array2;
if (hipMalloc(&d_array2, size) == hipSuccess) {
TM.start();
hipMemcpy(d_array2, d_array, size, hipMemcpyDeviceToDevice);
TM.stop();
D2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memcpy(d_array, size, d_array2);
TM.stop();
memcpy_kernel_time.push_back(TM.duration());
cuFree(d_array2);
}
else {
D2D_time.push_back(std::nan(""));
memcpy_kernel_time.push_back(std::nan(""));
}
cuFree(d_array);
//----------------------------------------------------------------------
size *= 2;
}
size = 1024;
std::cout << "\n\n" << std::setprecision(2) << std::right << std::fixed
<< std::setw(8) << "SIZE"
<< std::setw(11) << "MemcpyHtD"
<< std::setw(14) << "MemcpyHtDPin"
<< std::setw(11) << "MemcpyDtD"
<< std::setw(14) << "MemcpyKernel"
<< std::setw(8) << "Memset"
<< std::setw(14) << "MemsetKernel" << std::endl;
xlib::char_sequence('-', 80);
for (size_t i = 0; i < H2D_time.size(); i++) {
std::cout << std::setw(8) << xlib::human_readable(size)
<< std::setw(11) << H2D_time[i]
<< std::setw(14) << H2D_pinned_time[i]
<< std::setw(11) << D2D_time[i]
<< std::setw(14) << memcpy_kernel_time[i]
<< std::setw(8) << memset_time[i]
<< std::setw(14) << memset_kernel_time[i] << "\n";
size *= 2;
}
}
| 5020a7e00387819daca452dc208854488807aef7.cu | #include <Support/Device/SafeCudaAPI.cuh>
#include <Support/Device/SimpleKernels.cuh>
#include <Support/Device/Timer.cuh>
#include <cmath>
#include <limits>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace timer;
using xlib::byte_t;
int main() {
auto seed = std::chrono::high_resolution_clock::now().time_since_epoch()
.count();
std::mt19937_64 gen(seed);
//std::generate(v.begin(), v.end(), std::rand);
std::uniform_int_distribution<unsigned char>
distribution(0, std::numeric_limits<unsigned char>::max());
size_t size = 1024;
Timer<DEVICE> TM;
std::vector<float> H2D_time;
std::vector<float> H2D_pinned_time;
std::vector<float> D2D_time;
std::vector<float> memcpy_kernel_time;
std::vector<float> memset_time;
std::vector<float> memset_kernel_time;
std::cout << "Computing";
while (true) {
std::cout << "." << std::flush;
//======================================================================
byte_t* d_array;
if (cudaMalloc(&d_array, size) != cudaSuccess)
break;
auto h_array = new byte_t[size];
TM.start();
cuMemcpyToDevice(h_array, size, d_array);
TM.stop();
delete[] h_array;
H2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
byte_t* h_array_pinned;
cudaMallocHost(&h_array_pinned, size);
TM.start();
cuMemcpyToDevice(h_array_pinned, size, d_array);
TM.stop();
cudaFreeHost(h_array_pinned);
H2D_pinned_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cudaMemset(d_array, 0x00, size);
TM.stop();
memset_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memset(reinterpret_cast<unsigned char*>(d_array), size,
(unsigned char) 0);
TM.stop();
CHECK_CUDA_ERROR
memset_kernel_time.push_back(TM.duration());
//----------------------------------------------------------------------
byte_t* d_array2;
if (cudaMalloc(&d_array2, size) == cudaSuccess) {
TM.start();
cudaMemcpy(d_array2, d_array, size, cudaMemcpyDeviceToDevice);
TM.stop();
D2D_time.push_back(TM.duration());
//----------------------------------------------------------------------
TM.start();
cu::memcpy(d_array, size, d_array2);
TM.stop();
memcpy_kernel_time.push_back(TM.duration());
cuFree(d_array2);
}
else {
D2D_time.push_back(std::nan(""));
memcpy_kernel_time.push_back(std::nan(""));
}
cuFree(d_array);
//----------------------------------------------------------------------
size *= 2;
}
size = 1024;
std::cout << "\n\n" << std::setprecision(2) << std::right << std::fixed
<< std::setw(8) << "SIZE"
<< std::setw(11) << "MemcpyHtD"
<< std::setw(14) << "MemcpyHtDPin"
<< std::setw(11) << "MemcpyDtD"
<< std::setw(14) << "MemcpyKernel"
<< std::setw(8) << "Memset"
<< std::setw(14) << "MemsetKernel" << std::endl;
xlib::char_sequence('-', 80);
for (size_t i = 0; i < H2D_time.size(); i++) {
std::cout << std::setw(8) << xlib::human_readable(size)
<< std::setw(11) << H2D_time[i]
<< std::setw(14) << H2D_pinned_time[i]
<< std::setw(11) << D2D_time[i]
<< std::setw(14) << memcpy_kernel_time[i]
<< std::setw(8) << memset_time[i]
<< std::setw(14) << memset_kernel_time[i] << "\n";
size *= 2;
}
}
|
3444db3e30461134c55dbb73ee97fba6aba81a38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "inv_roi_align_op.h"
#include <stdio.h>
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__device__ void bilinear_interpolate_forward(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__device__ T bilinear_interpolate_backward(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void InvRoIAlignForward(const int nthreads,
const T* bottom_data, const float* bottom_rois,
const float spatial_scale,
const int batch, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = static_cast<int>(offset_bottom_rois[0]);
if (roi_batch_ind < 0 || roi_batch_ind >= batch) {
continue;
}
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1 (should not happen as I have removed those)
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1.));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_top_data = top_data + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_bottom_data = bottom_data + top_offset;
const T bottom_data_this_bin = offset_bottom_data[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++) {
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_forward(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = bottom_data_this_bin * w1 / count;
T g2 = bottom_data_this_bin * w2 / count;
T g3 = bottom_data_this_bin * w3 / count;
T g4 = bottom_data_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(static_cast<T>(g1), offset_top_data + y_low * width + x_low);
gpu_atomic_add(static_cast<T>(g2), offset_top_data + y_low * width + x_high);
gpu_atomic_add(static_cast<T>(g3), offset_top_data + y_high * width + x_low);
gpu_atomic_add(static_cast<T>(g4), offset_top_data + y_high * width + x_high);
} // if
} // ix
} // iy
}
}
template <typename T>
__global__ void InvRoIAlignBackward(const int nthreads,
const T* input_grad, const float* bottom_rois,
const float spatial_scale,
const int batch, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
T* output_grad) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
const int roi_batch_ind = static_cast<int>(offset_bottom_rois[0]);
if (roi_batch_ind < 0 || roi_batch_ind >= batch) {
output_grad[index] = 0.;
continue;
}
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1.));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_grad = input_grad + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++) {
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate_backward(offset_input_grad,
height,
width,
y,
x,
index);
output_val += val;
}
}
output_val /= count;
output_grad[index] = output_val;
} // CUDA_1D_KERNEL_LOOP
} // InvRoIAlignBackward
} // namespace
template<>
bool InvRoIAlignOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data
auto& R = Input(1); // RoIs
auto& RX = Input(2); // RoI features
auto* Y = Output(0); // RoI pooled data
Y->ResizeLike(X);
math::Set<float, CUDAContext>(
Y->size(), 0.f, Y->mutable_data<float>(), &context_);
// if R is empty, then just return zero
if (R.size() == 0)
return true;
// get dimensions
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
DCHECK_EQ(C, RX.dim32(1));
const int pH = RX.dim32(2);
const int pW = RX.dim32(3);
hipLaunchKernelGGL(( InvRoIAlignForward<float>)
, dim3(CAFFE_GET_BLOCKS(RX.size())),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
RX.size(),
RX.data<float>(),
R.data<float>(),
spatial_scale_,
N, C,
H, W,
pH, pW,
Y->mutable_data<float>());
return true;
}
template<>
bool InvRoIAlignGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data
auto& R = Input(1); // RoIs
auto& RX = Input(2); // RoI features
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dRX = Output(0);// Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
// get dimensions
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const int pH = RX.dim32(2);
const int pW = RX.dim32(3);
dRX->ResizeLike(RX);
if (R.size() == 0) {
// The following mutable_data calls are needed to allocate the tensors
dRX->mutable_data<float>();
return true;
}
hipLaunchKernelGGL(( InvRoIAlignBackward<float>)
, dim3(CAFFE_GET_BLOCKS(RX.size())),
dim3(CAFFE_CUDA_NUM_THREADS), 0,
context_.cuda_stream(),
RX.size(),
dY.data<float>(),
R.data<float>(),
spatial_scale_,
N, C,
H, W,
pH, pW,
dRX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(InvRoIAlign,
InvRoIAlignOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(InvRoIAlignGradient,
InvRoIAlignGradientOp<float, CUDAContext>);
} // namespace caffe2 | 3444db3e30461134c55dbb73ee97fba6aba81a38.cu | #include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "inv_roi_align_op.h"
#include <stdio.h>
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__device__ void bilinear_interpolate_forward(
const int height, const int width,
T y, T x,
T & w1, T & w2, T & w3, T & w4,
int & x_low, int & x_high, int & y_low, int & y_high,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
y_low = (int) y;
x_low = (int) x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
return;
}
template <typename T>
__device__ T bilinear_interpolate_backward(const T* bottom_data,
const int height, const int width,
T y, T x,
const int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
//empty
return 0;
}
if (y <= 0) y = 0;
if (x <= 0) x = 0;
int y_low = (int) y;
int x_low = (int) x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T) y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T) x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = bottom_data[y_low * width + x_low];
T v2 = bottom_data[y_low * width + x_high];
T v3 = bottom_data[y_high * width + x_low];
T v4 = bottom_data[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void InvRoIAlignForward(const int nthreads,
const T* bottom_data, const float* bottom_rois,
const float spatial_scale,
const int batch, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
T* top_data) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = static_cast<int>(offset_bottom_rois[0]);
if (roi_batch_ind < 0 || roi_batch_ind >= batch) {
continue;
}
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1 (should not happen as I have removed those)
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1.));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_top_data = top_data + (roi_batch_ind * channels + c) * height * width;
int top_offset = (n * channels + c) * pooled_height * pooled_width;
const T* offset_bottom_data = bottom_data + top_offset;
const T bottom_data_this_bin = offset_bottom_data[ph * pooled_width + pw];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy ++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++) {
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_forward(height, width, y, x,
w1, w2, w3, w4,
x_low, x_high, y_low, y_high,
index);
T g1 = bottom_data_this_bin * w1 / count;
T g2 = bottom_data_this_bin * w2 / count;
T g3 = bottom_data_this_bin * w3 / count;
T g4 = bottom_data_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
gpu_atomic_add(static_cast<T>(g1), offset_top_data + y_low * width + x_low);
gpu_atomic_add(static_cast<T>(g2), offset_top_data + y_low * width + x_high);
gpu_atomic_add(static_cast<T>(g3), offset_top_data + y_high * width + x_low);
gpu_atomic_add(static_cast<T>(g4), offset_top_data + y_high * width + x_high);
} // if
} // ix
} // iy
}
}
template <typename T>
__global__ void InvRoIAlignBackward(const int nthreads,
const T* input_grad, const float* bottom_rois,
const float spatial_scale,
const int batch, const int channels, const int height, const int width,
const int pooled_height, const int pooled_width,
T* output_grad) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
const int pw = index % pooled_width;
const int ph = (index / pooled_width) % pooled_height;
const int c = (index / pooled_width / pooled_height) % channels;
const int n = index / pooled_width / pooled_height / channels;
const float* offset_bottom_rois = bottom_rois + n * 5;
const int roi_batch_ind = static_cast<int>(offset_bottom_rois[0]);
if (roi_batch_ind < 0 || roi_batch_ind >= batch) {
output_grad[index] = 0.;
continue;
}
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[1] * spatial_scale;
T roi_start_h = offset_bottom_rois[2] * spatial_scale;
T roi_end_w = offset_bottom_rois[3] * spatial_scale;
T roi_end_h = offset_bottom_rois[4] * spatial_scale;
// Force malformed ROIs to be 1x1
T roi_width = max(roi_end_w - roi_start_w, static_cast<T>(1.));
T roi_height = max(roi_end_h - roi_start_h, static_cast<T>(1.));
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input_grad = input_grad + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w = ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy ++) { // e.g., iy = 0, 1
const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix ++) {
const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate_backward(offset_input_grad,
height,
width,
y,
x,
index);
output_val += val;
}
}
output_val /= count;
output_grad[index] = output_val;
} // CUDA_1D_KERNEL_LOOP
} // InvRoIAlignBackward
} // namespace
template<>
bool InvRoIAlignOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data
auto& R = Input(1); // RoIs
auto& RX = Input(2); // RoI features
auto* Y = Output(0); // RoI pooled data
Y->ResizeLike(X);
math::Set<float, CUDAContext>(
Y->size(), 0.f, Y->mutable_data<float>(), &context_);
// if R is empty, then just return zero
if (R.size() == 0)
return true;
// get dimensions
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
DCHECK_EQ(C, RX.dim32(1));
const int pH = RX.dim32(2);
const int pW = RX.dim32(3);
InvRoIAlignForward<float>
<<<CAFFE_GET_BLOCKS(RX.size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
RX.size(),
RX.data<float>(),
R.data<float>(),
spatial_scale_,
N, C,
H, W,
pH, pW,
Y->mutable_data<float>());
return true;
}
template<>
bool InvRoIAlignGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data
auto& R = Input(1); // RoIs
auto& RX = Input(2); // RoI features
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dRX = Output(0);// Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
// get dimensions
const int N = X.dim32(0);
const int C = X.dim32(1);
const int H = X.dim32(2);
const int W = X.dim32(3);
const int pH = RX.dim32(2);
const int pW = RX.dim32(3);
dRX->ResizeLike(RX);
if (R.size() == 0) {
// The following mutable_data calls are needed to allocate the tensors
dRX->mutable_data<float>();
return true;
}
InvRoIAlignBackward<float>
<<<CAFFE_GET_BLOCKS(RX.size()),
CAFFE_CUDA_NUM_THREADS, 0,
context_.cuda_stream()>>>(
RX.size(),
dY.data<float>(),
R.data<float>(),
spatial_scale_,
N, C,
H, W,
pH, pW,
dRX->mutable_data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(InvRoIAlign,
InvRoIAlignOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(InvRoIAlignGradient,
InvRoIAlignGradientOp<float, CUDAContext>);
} // namespace caffe2 |
9d4a2a5b4733c01a978d26df0f5b6935088a0bfa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for (int i = 0; i < N; i++)
{
if (vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2 << 24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
int threadsPerBlock;
int numberOfBlocks;
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = 1;
numberOfBlocks = 1;
hipError_t addVectorsErr;
hipError_t asyncErr;
addVectorsInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N);
addVectorsErr = hipGetLastError();
if (addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if (asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| 9d4a2a5b4733c01a978d26df0f5b6935088a0bfa.cu | #include <stdio.h>
#include <cstdlib>
#include <device_launch_parameters.h>
#include <cuda_runtime.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for (int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for (int i = 0; i < N; i++)
{
if (vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2 << 24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
int threadsPerBlock;
int numberOfBlocks;
/*
* nvprof should register performance changes when execution configuration
* is updated.
*/
threadsPerBlock = 1;
numberOfBlocks = 1;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
addVectorsInto << <numberOfBlocks, threadsPerBlock >> > (c, a, b, N);
addVectorsErr = cudaGetLastError();
if (addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if (asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
10c32c76c116035b0d63787f87b341fa2960285b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void MakeMerges (int size, int *mergeWith, int *offsets, int *mis) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int currentAgg = mis[idx];
int newAgg = mergeWith[currentAgg];
// If the aggregate is not merging just apply offset
if (newAgg == -1)
{
mis[idx] = currentAgg - offsets[currentAgg];
}
// The aggregate is merging find offset of aggregate merging with
else
{
mis[idx] = newAgg - offsets[newAgg];
}
}
} | 10c32c76c116035b0d63787f87b341fa2960285b.cu | #include "includes.h"
__global__ void MakeMerges (int size, int *mergeWith, int *offsets, int *mis) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int currentAgg = mis[idx];
int newAgg = mergeWith[currentAgg];
// If the aggregate is not merging just apply offset
if (newAgg == -1)
{
mis[idx] = currentAgg - offsets[currentAgg];
}
// The aggregate is merging find offset of aggregate merging with
else
{
mis[idx] = newAgg - offsets[newAgg];
}
}
} |
aba64f7c4609785085ede36051e5adf45e3fd827.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <string.h>
#define TOTAL_SIZE 1024
//#define TOTAL_SIZE (1024*1024*1024)
#define block_dim 1024
#define chk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double *h_c, *h_a, *h_b;
double **d_c, **d_a, **d_b;
hipStream_t *streams;
hipEvent_t start, finish;
void allocate(int devices, int multi_gpu) {
int i = 0, parts, rem;
//h_c = (double *) malloc(sizeof(double) * TOTAL_SIZE);
//h_a = (double *) malloc(sizeof(double) * TOTAL_SIZE);
//h_b = (double *) malloc(sizeof(double) * TOTAL_SIZE);
d_c = (double **) malloc(sizeof(double *) * devices);
d_a = (double **) malloc(sizeof(double *) * devices);
d_b = (double **) malloc(sizeof(double *) * devices);
hipHostMalloc((void **) &h_c, sizeof(double) * TOTAL_SIZE);
hipHostMalloc((void **) &h_a, sizeof(double) * TOTAL_SIZE);
hipHostMalloc((void **) &h_b, sizeof(double) * TOTAL_SIZE);
//hipHostMalloc((void **)d_c, sizeof(double *) * devices);
//hipHostMalloc((void **)d_a, sizeof(double *) * devices);
//hipHostMalloc((void **)d_b, sizeof(double *) * devices);
streams = (hipStream_t *) malloc(sizeof(hipStream_t) * devices);
for (i=0; i<devices; ++i) {
hipStreamCreate(&streams[i]);
}
parts = TOTAL_SIZE / devices;
rem = TOTAL_SIZE % devices;
i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
hipSetDevice(i);
printf("\nS%d", streams[i]);
chk(hipMallocAsync((void **) &d_c[i], sizeof(double) * parts, streams[i]));
chk(hipMallocAsync((void **) &d_a[i], sizeof(double) * parts, streams[i]));
chk(hipMallocAsync((void **) &d_b[i], sizeof(double) * parts, streams[i]));
}
}
hipSetDevice(i);
chk(hipMallocAsync((void **) &d_c[i], sizeof(double) * (parts + rem), streams[i]));
chk(hipMallocAsync((void **) &d_a[i], sizeof(double) * (parts + rem), streams[i]));
chk(hipMallocAsync((void **) &d_b[i], sizeof(double) * (parts + rem), streams[i]));
hipEventCreate(&start);
hipEventCreate(&finish);
}
extern "C" __global__ void vec_add(double *c, double *a, double *b, int PART_SIZE) {
int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t < TOTAL_SIZE && t < PART_SIZE) {
c[t] = a[t] + b[t];
if (t % 100)
printf("\n%f", c[t]);
}
}
void kernels_launch(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
hipSetDevice(i);
hipLaunchKernelGGL(( vec_add), dim3(parts/block_dim + 1), dim3(block_dim), 0, streams[i], d_c[i], d_a[i], d_b[i], parts);
}
}
hipSetDevice(i);
hipLaunchKernelGGL(( vec_add), dim3((parts + rem)/block_dim + 1), dim3(block_dim), 0, streams[i], d_c[i], d_a[i], d_b[i], parts + rem);
}
void data_transferHtoD(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
hipSetDevice(i);
printf("\nS%d", streams[i]);
chk(hipMemcpyAsync(d_a[i], h_a + (parts * i), sizeof(double) * parts, hipMemcpyHostToDevice, streams[i]));
chk(hipMemcpyAsync(d_b[i], h_b + (parts * i), sizeof(double) * parts, hipMemcpyHostToDevice, streams[i]));
}
}
hipSetDevice(i);
chk(hipMemcpyAsync(d_a[i], h_a + (parts * i), sizeof(double) * (parts + rem), hipMemcpyHostToDevice, streams[i]));
chk(hipMemcpyAsync(d_b[i], h_b + (parts * i), sizeof(double) * (parts + rem), hipMemcpyHostToDevice, streams[i]));
}
void data_transferDtoH(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
//Data trnsfer back
for (i=0; i<devices-1; ++i) {
hipSetDevice(i);
chk(hipMemcpyAsync(h_c + (parts * i), d_c[i], sizeof(double) * parts, hipMemcpyDeviceToHost, streams[i]));
}
hipSetDevice(i);
chk(hipMemcpyAsync(h_c + (parts * i), d_c[i], sizeof(double) * (parts + rem), hipMemcpyDeviceToHost, streams[i]));
}
}
void deallocate(int devices) {
for (int i=0; i<devices; ++i) {
hipSetDevice(i);
hipFreeAsync(d_c[i], streams[i]);
hipFreeAsync(d_a[i], streams[i]);
hipFreeAsync(d_b[i], streams[i]);
}
for (int i=0; i<devices; ++i) {
hipStreamDestroy(streams[i]);
}
free(d_c);
free(d_a);
free(d_b);
//free(h_a);
//free(h_b);
//free(h_c);
//hipHostFree(d_c);
//hipHostFree(d_a);
//hipHostFree(d_b);
hipHostFree(h_a);
hipHostFree(h_b);
hipHostFree(h_c);
hipEventDestroy(start);
hipEventDestroy(finish);
}
void verify() {
double diff_sq = 0.0;
double sum_sq = 0.0;
for (int i=0; i<TOTAL_SIZE; ++i) {
sum_sq += h_c[i] * h_c[i];
diff_sq += (h_c[i] - (h_a[i] + h_b[i])) * (h_c[i] - (h_a[i] + h_b[i]));
}
printf("\n%f\t%f\n", h_c[0], h_c[5]);
printf("\n\nError Rate: %e\n", diff_sq / sum_sq);
}
int main(int argc, char **argv) {
int i, parts, rem, devices = 1;
float exec_time;
int multi_gpu = 0;
if (argc > 1 && strcmp(argv[1], "-m") == 0) {
multi_gpu = 1;
}
chk(hipGetDeviceCount(&devices));
printf("\nNum devices available = %d\n", devices);
if (devices == 0) {
printf("\nError: No devices found\n");
exit(1);
}
if (devices ==1)
multi_gpu = 0;
allocate(devices, multi_gpu);
//Initialize data
for (i=0; i<TOTAL_SIZE; ++i) {
h_a[i] = i + 1;
h_b[i] = i + 2;
}
data_transferHtoD(devices, multi_gpu);
hipEventRecord(start);
kernels_launch(devices, multi_gpu);
hipEventRecord(finish);
for (i=0; i<devices; ++i)
hipStreamWaitEvent(streams[i], finish);
data_transferDtoH(devices, multi_gpu);
for (i=0; i<devices; ++i)
hipStreamSynchronize(streams[i]);
if (TOTAL_SIZE <= 2048) {
verify();
}
hipEventElapsedTime(&exec_time, start, finish);
printf("MultiGPU Time = %f", exec_time / 1000);
deallocate(devices);
printf("\nFinished.\n");
return 0;
}
| aba64f7c4609785085ede36051e5adf45e3fd827.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <string.h>
#define TOTAL_SIZE 1024
//#define TOTAL_SIZE (1024*1024*1024)
#define block_dim 1024
#define chk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
double *h_c, *h_a, *h_b;
double **d_c, **d_a, **d_b;
cudaStream_t *streams;
cudaEvent_t start, finish;
void allocate(int devices, int multi_gpu) {
int i = 0, parts, rem;
//h_c = (double *) malloc(sizeof(double) * TOTAL_SIZE);
//h_a = (double *) malloc(sizeof(double) * TOTAL_SIZE);
//h_b = (double *) malloc(sizeof(double) * TOTAL_SIZE);
d_c = (double **) malloc(sizeof(double *) * devices);
d_a = (double **) malloc(sizeof(double *) * devices);
d_b = (double **) malloc(sizeof(double *) * devices);
cudaMallocHost((void **) &h_c, sizeof(double) * TOTAL_SIZE);
cudaMallocHost((void **) &h_a, sizeof(double) * TOTAL_SIZE);
cudaMallocHost((void **) &h_b, sizeof(double) * TOTAL_SIZE);
//cudaMallocHost((void **)d_c, sizeof(double *) * devices);
//cudaMallocHost((void **)d_a, sizeof(double *) * devices);
//cudaMallocHost((void **)d_b, sizeof(double *) * devices);
streams = (cudaStream_t *) malloc(sizeof(cudaStream_t) * devices);
for (i=0; i<devices; ++i) {
cudaStreamCreate(&streams[i]);
}
parts = TOTAL_SIZE / devices;
rem = TOTAL_SIZE % devices;
i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
printf("\nS%d", streams[i]);
chk(cudaMallocAsync((void **) &d_c[i], sizeof(double) * parts, streams[i]));
chk(cudaMallocAsync((void **) &d_a[i], sizeof(double) * parts, streams[i]));
chk(cudaMallocAsync((void **) &d_b[i], sizeof(double) * parts, streams[i]));
}
}
cudaSetDevice(i);
chk(cudaMallocAsync((void **) &d_c[i], sizeof(double) * (parts + rem), streams[i]));
chk(cudaMallocAsync((void **) &d_a[i], sizeof(double) * (parts + rem), streams[i]));
chk(cudaMallocAsync((void **) &d_b[i], sizeof(double) * (parts + rem), streams[i]));
cudaEventCreate(&start);
cudaEventCreate(&finish);
}
extern "C" __global__ void vec_add(double *c, double *a, double *b, int PART_SIZE) {
int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t < TOTAL_SIZE && t < PART_SIZE) {
c[t] = a[t] + b[t];
if (t % 100)
printf("\n%f", c[t]);
}
}
void kernels_launch(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
vec_add<<<parts/block_dim + 1, block_dim, 0, streams[i]>>>(d_c[i], d_a[i], d_b[i], parts);
}
}
cudaSetDevice(i);
vec_add<<<(parts + rem)/block_dim + 1, block_dim, 0, streams[i]>>>(d_c[i], d_a[i], d_b[i], parts + rem);
}
void data_transferHtoD(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
printf("\nS%d", streams[i]);
chk(cudaMemcpyAsync(d_a[i], h_a + (parts * i), sizeof(double) * parts, cudaMemcpyHostToDevice, streams[i]));
chk(cudaMemcpyAsync(d_b[i], h_b + (parts * i), sizeof(double) * parts, cudaMemcpyHostToDevice, streams[i]));
}
}
cudaSetDevice(i);
chk(cudaMemcpyAsync(d_a[i], h_a + (parts * i), sizeof(double) * (parts + rem), cudaMemcpyHostToDevice, streams[i]));
chk(cudaMemcpyAsync(d_b[i], h_b + (parts * i), sizeof(double) * (parts + rem), cudaMemcpyHostToDevice, streams[i]));
}
void data_transferDtoH(int devices, int multi_gpu) {
int parts = TOTAL_SIZE / devices;
int rem = TOTAL_SIZE % devices;
int i = 0;
if (multi_gpu) {
//Data trnsfer back
for (i=0; i<devices-1; ++i) {
cudaSetDevice(i);
chk(cudaMemcpyAsync(h_c + (parts * i), d_c[i], sizeof(double) * parts, cudaMemcpyDeviceToHost, streams[i]));
}
cudaSetDevice(i);
chk(cudaMemcpyAsync(h_c + (parts * i), d_c[i], sizeof(double) * (parts + rem), cudaMemcpyDeviceToHost, streams[i]));
}
}
void deallocate(int devices) {
for (int i=0; i<devices; ++i) {
cudaSetDevice(i);
cudaFreeAsync(d_c[i], streams[i]);
cudaFreeAsync(d_a[i], streams[i]);
cudaFreeAsync(d_b[i], streams[i]);
}
for (int i=0; i<devices; ++i) {
cudaStreamDestroy(streams[i]);
}
free(d_c);
free(d_a);
free(d_b);
//free(h_a);
//free(h_b);
//free(h_c);
//cudaFreeHost(d_c);
//cudaFreeHost(d_a);
//cudaFreeHost(d_b);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaEventDestroy(start);
cudaEventDestroy(finish);
}
void verify() {
double diff_sq = 0.0;
double sum_sq = 0.0;
for (int i=0; i<TOTAL_SIZE; ++i) {
sum_sq += h_c[i] * h_c[i];
diff_sq += (h_c[i] - (h_a[i] + h_b[i])) * (h_c[i] - (h_a[i] + h_b[i]));
}
printf("\n%f\t%f\n", h_c[0], h_c[5]);
printf("\n\nError Rate: %e\n", diff_sq / sum_sq);
}
int main(int argc, char **argv) {
int i, parts, rem, devices = 1;
float exec_time;
int multi_gpu = 0;
if (argc > 1 && strcmp(argv[1], "-m") == 0) {
multi_gpu = 1;
}
chk(cudaGetDeviceCount(&devices));
printf("\nNum devices available = %d\n", devices);
if (devices == 0) {
printf("\nError: No devices found\n");
exit(1);
}
if (devices ==1)
multi_gpu = 0;
allocate(devices, multi_gpu);
//Initialize data
for (i=0; i<TOTAL_SIZE; ++i) {
h_a[i] = i + 1;
h_b[i] = i + 2;
}
data_transferHtoD(devices, multi_gpu);
cudaEventRecord(start);
kernels_launch(devices, multi_gpu);
cudaEventRecord(finish);
for (i=0; i<devices; ++i)
cudaStreamWaitEvent(streams[i], finish);
data_transferDtoH(devices, multi_gpu);
for (i=0; i<devices; ++i)
cudaStreamSynchronize(streams[i]);
if (TOTAL_SIZE <= 2048) {
verify();
}
cudaEventElapsedTime(&exec_time, start, finish);
printf("MultiGPU Time = %f", exec_time / 1000);
deallocate(devices);
printf("\nFinished.\n");
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.